diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java index 615b3a467e6e..3012efffcd2b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase; import java.util.Collections; +import java.util.IdentityHashMap; import java.util.Map; import java.util.stream.Collectors; import org.apache.hadoop.hbase.client.RegionInfo; @@ -28,7 +29,7 @@ public final class CacheEvictionStats { private final long evictedBlocks; private final long maxCacheSize; - private final Map exceptions; + private final IdentityHashMap exceptions; CacheEvictionStats(CacheEvictionStatsBuilder builder) { this.evictedBlocks = builder.evictedBlocks; @@ -44,6 +45,7 @@ public long getMaxCacheSize() { return maxCacheSize; } + @SuppressWarnings("IdentityHashMapUsage") public Map getExceptions() { return Collections.unmodifiableMap(exceptions); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java index 4b31d98611bc..025a0ff68329 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java @@ -17,15 +17,15 @@ */ package org.apache.hadoop.hbase; -import java.util.HashMap; -import java.util.Map; +import java.util.IdentityHashMap; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public final class CacheEvictionStatsBuilder { + long evictedBlocks = 0; long maxCacheSize = 0; - Map exceptions = new HashMap<>(); + IdentityHashMap exceptions = new IdentityHashMap<>(); CacheEvictionStatsBuilder() { } @@ -44,6 +44,7 @@ public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } + @SuppressWarnings("IdentityHashMapUsage") public CacheEvictionStatsBuilder append(CacheEvictionStats stats) { this.evictedBlocks += stats.getEvictedBlocks(); this.maxCacheSize += stats.getMaxCacheSize(); @@ -54,4 +55,5 @@ public CacheEvictionStatsBuilder append(CacheEvictionStats stats) { public CacheEvictionStats build() { return new CacheEvictionStats(this); } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java index 6ad1a18e83da..e2708b9fc42c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java @@ -349,8 +349,8 @@ public static TableState getTableState(Result r) throws IOException { } /** - * @return Deserialized values of <qualifier,regioninfo> pairs taken from column values that - * match the regex 'info:merge.*' in array of cells. + * Return deserialized values of <qualifier,regioninfo> pairs taken from column values that + * match the regex 'info:merge.*' in array of cells. */ @Nullable public static Map getMergeRegionsWithName(Cell[] cells) { @@ -376,8 +376,8 @@ public static Map getMergeRegionsWithName(Cell[] cells) { } /** - * @return Deserialized regioninfo values taken from column values that match the regex - * 'info:merge.*' in array of cells. + * Return deserialized regioninfo values taken from column values that match the regex + * 'info:merge.*' in array of cells. */ @Nullable public static List getMergeRegions(Cell[] cells) { @@ -386,8 +386,8 @@ public static List getMergeRegions(Cell[] cells) { } /** - * @return True if any merge regions present in cells; i.e. the column in - * cell matches the regex 'info:merge.*'. + * Return true if any merge regions present in cells; i.e. the column in + * cell matches the regex 'info:merge.*'. */ public static boolean hasMergeRegions(Cell[] cells) { for (Cell cell : cells) { @@ -399,7 +399,7 @@ public static boolean hasMergeRegions(Cell[] cells) { } /** - * @return True if the column in cell matches the regex 'info:merge.*'. + * Return true if the column in cell matches the regex 'info:merge.*'. */ public static boolean isMergeQualifierPrefix(Cell cell) { // Check to see if has family and that qualifier starts with the merge qualifier 'merge' diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java index e948048325fe..d02273dac066 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java @@ -59,6 +59,7 @@ private ClientMetaTableAccessor() { } @InterfaceAudience.Private + @SuppressWarnings("ImmutableEnumChecker") public enum QueryType { ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), REGION(HConstants.CATALOG_FAMILY), @@ -101,8 +102,7 @@ public static CompletableFuture> getTableState(AsyncTable> @@ -127,8 +127,7 @@ public static CompletableFuture> getTableState(AsyncTable> @@ -167,8 +166,8 @@ private static Optional getTableState(Result r) throws IOException { } /** - * Used to get all region locations for the specific table. n * @param tableName table we're - * looking for, can be null for getting all regions + * Used to get all region locations for the specific table we're looking for. Can be null for + * getting all regions. * @return the list of region locations. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -191,9 +190,8 @@ public static CompletableFuture> getTableHRegionLocations( } /** - * Used to get table regions' info and server. n * @param tableName table we're looking for, can - * be null for getting all regions - * @param excludeOfflinedSplitParents don't return split parents + * Used to get table regions' info and server for the table we're looking for. Can be null for + * getting all regions. * @return the list of regioninfos and server. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -220,24 +218,12 @@ private static CompletableFuture>> getTableReg return future; } - /** - * Performs a scan of META table for given table. n * @param tableName table withing we scan - * @param type scanned part of meta - * @param visitor Visitor invoked against each row - */ private static CompletableFuture scanMeta(AsyncTable metaTable, TableName tableName, QueryType type, final Visitor visitor) { return scanMeta(metaTable, getTableStartRowForMeta(tableName, type), getTableStopRowForMeta(tableName, type), type, Integer.MAX_VALUE, visitor); } - /** - * Performs a scan of META table for given table. n * @param startRow Where to start the scan - * @param stopRow Where to stop the scan - * @param type scanned part of meta - * @param maxRows maximum rows to return - * @param visitor Visitor invoked against each row - */ private static CompletableFuture scanMeta(AsyncTable metaTable, byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) { int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; @@ -383,7 +369,7 @@ public boolean visit(Result r) throws IOException { abstract void add(Result r); /** - * @return Collected results; wait till visits complete to collect all possible results + * Return collected results; wait till visits complete to collect all possible results */ List getResults() { return this.results; @@ -468,6 +454,7 @@ private static Optional getRegionLocations(Result r) { } /** + * Determine the start row for scanning META according to query type * @param tableName table we're working with * @return start row for scanning META according to query type */ @@ -493,6 +480,7 @@ public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type } /** + * Determine the stop row for scanning META according to query type * @param tableName table we're working with * @return stop row for scanning META according to query type */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java index e769e80847f9..e730c044181e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -34,9 +34,7 @@ public class ClusterId { private final String id; - /** - * New ClusterID. Generates a uniqueid. - */ + /** New ClusterID. Generates a unique UUID. */ public ClusterId() { this(UUID.randomUUID().toString()); } @@ -45,16 +43,16 @@ public ClusterId(final String uuid) { this.id = uuid; } - /** - * @return The clusterid serialized using pb w/ pb magic prefix - */ + /** Return the cluster id serialized using pb with the pb magic prefix */ public byte[] toByteArray() { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } /** + * Parse a serialized representation of the cluster id. * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix - * @return An instance of {@link ClusterId} made from bytes n * @see #toByteArray() + * @return An instance of {@link ClusterId} made from bytes + * @see #toByteArray() */ public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes)) { @@ -74,24 +72,17 @@ public static ClusterId parseFrom(final byte[] bytes) throws DeserializationExce } } - /** - * @return A pb instance to represent this instance. - */ + /** Return a pb instance to represent this instance. */ public ClusterIdProtos.ClusterId convert() { ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); return builder.setClusterId(this.id).build(); } - /** - * n * @return A {@link ClusterId} made from the passed in cid - */ + /** Return a {@link ClusterId} made from the passed in cid */ public static ClusterId convert(final ClusterIdProtos.ClusterId cid) { return new ClusterId(cid.getClusterId()); } - /** - * @see java.lang.Object#toString() - */ @Override public String toString() { return this.id; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java index e36010c424ce..82ce0f7d0076 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java @@ -70,28 +70,28 @@ public interface ClusterMetrics { /** - * @return the HBase version string as reported by the HMaster + * Return the HBase version string as reported by the master. */ @Nullable String getHBaseVersion(); /** - * @return the names of region servers on the dead list + * Return the names of region servers on the dead list. */ List getDeadServerNames(); /** - * @return the names of region servers on the decommissioned list + * Return the names of region servers on the decommissioned list. */ List getDecommissionedServerNames(); /** - * @return the names of region servers on the live list + * Return the names of region servers on the live list. */ Map getLiveServerMetrics(); /** - * @return the number of regions deployed on the cluster + * Return the number of regions deployed on the cluster. */ default int getRegionCount() { return getLiveServerMetrics().entrySet().stream() @@ -99,7 +99,7 @@ default int getRegionCount() { } /** - * @return the number of requests since last report + * Return the number of requests since the last report. */ default long getRequestCount() { return getLiveServerMetrics().entrySet().stream() @@ -115,7 +115,7 @@ default long getRequestCount() { ServerName getMasterName(); /** - * @return the names of backup masters + * Return the names of the backup masters. */ List getBackupMasterNames(); @@ -148,7 +148,7 @@ default long getLastMajorCompactionTimestamp(byte[] regionName) { List getServersName(); /** - * @return the average cluster load + * Return the average cluster load. */ default double getAverageLoad() { int serverSize = getLiveServerMetrics().size(); @@ -159,7 +159,7 @@ default double getAverageLoad() { } /** - * Provide region states count for given table. e.g howmany regions of give table are + * Provide region states count for given table. e.g how many regions of give table are * opened/closed/rit etc * @return map of table to region states count */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index 5695f5b65ade..7254209487b2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -67,13 +67,13 @@ public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics m .collect(Collectors.toList())) .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() .map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) + .setTableName(ProtobufUtil.toProtoTableName(status.getKey())) .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())).build()) .collect(Collectors.toList())) .addAllDecommissionedServers(metrics.getDecommissionedServerNames().stream() .map(ProtobufUtil::toServerName).collect(Collectors.toList())); if (metrics.getMasterName() != null) { - builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); + builder.setMaster(ProtobufUtil.toServerName(metrics.getMasterName())); } if (metrics.getMasterTasks() != null) { builder.addAllMasterTasks(metrics.getMasterTasks().stream() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java index edbc5f479d6e..64b40e3caf5d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java @@ -28,29 +28,25 @@ @InterfaceStability.Evolving public interface CoprocessorEnvironment { - /** @return the Coprocessor interface version */ + /** Return the coprocessor interface version */ int getVersion(); - /** @return the HBase version as a string (e.g. "0.21.0") */ + /** Return the HBase version as a string (e.g. "0.21.0") */ String getHBaseVersion(); - /** @return the loaded coprocessor instance */ + /** Return the loaded coprocessor instance */ C getInstance(); - /** @return the priority assigned to the loaded coprocessor */ + /** Return the priority assigned to the loaded coprocessor */ int getPriority(); - /** @return the load sequence number */ + /** Return the load sequence number */ int getLoadSequence(); - /** - * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to - * set a configuration. - */ + /** Return a read-only Configuration. */ Configuration getConfiguration(); - /** - * @return the classloader for the loaded coprocessor instance - */ + /** Return the classloader for the loaded coprocessor instance */ ClassLoader getClassLoader(); + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java index 2e4ebbd0baa6..f7c463aecaf1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java @@ -45,8 +45,8 @@ public HBaseServerException(boolean serverOverloaded, String message) { } /** + * Return true if the server was considered overloaded when the exception was thrown * @param t throwable to check for server overloaded state - * @return True if the server was considered overloaded when the exception was thrown */ public static boolean isServerOverloaded(Throwable t) { if (t instanceof HBaseServerException) { @@ -56,6 +56,8 @@ public static boolean isServerOverloaded(Throwable t) { } /** + * Set or clear the server overloaded status. + *

* Necessary for parsing RemoteException on client side * @param serverOverloaded True if server was overloaded when exception was thrown */ @@ -63,9 +65,7 @@ public void setServerOverloaded(boolean serverOverloaded) { this.serverOverloaded = serverOverloaded; } - /** - * @return True if server was considered overloaded when exception was thrown - */ + /** Return true if server was considered overloaded when exception was thrown */ public boolean isServerOverloaded() { return serverOverloaded; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java index 0decb58bc20b..34395e2f0243 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -100,8 +100,8 @@ public long getSeqNum() { } /** - * @return String made of hostname and port formatted as per - * {@link Addressing#createHostAndPortStr(String, int)} + * Return a string made of hostname and port formatted as per + * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostnamePort() { return Addressing.createHostAndPortStr(this.getHostname(), this.getPort()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 4d6dd6d43fa3..4c0390c6c3be 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -208,6 +208,7 @@ public RegionLocations removeElementsWithNullLocation() { * @param other the locations to merge with * @return an RegionLocations object with merged locations or the same object if nothing is merged */ + @SuppressWarnings("ReferenceEquality") public RegionLocations mergeLocations(RegionLocations other) { assert other != null; @@ -280,6 +281,7 @@ private HRegionLocation selectRegionLocation(HRegionLocation oldLocation, * @return an RegionLocations object with updated locations or the same object if nothing is * updated */ + @SuppressWarnings("ReferenceEquality") public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals, boolean force) { assert location != null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index d873c4bc1cb4..b5e4ad44a3db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -28,160 +28,118 @@ @InterfaceAudience.Public public interface RegionMetrics { - /** - * @return the region name - */ + /** Return the region name */ byte[] getRegionName(); - /** - * @return the number of stores - */ + /** Return the number of stores */ int getStoreCount(); - /** - * @return the number of storefiles - */ + /** Return the number of storefiles */ int getStoreFileCount(); - /** - * @return the total size of the storefiles - */ + /** Return the total size of the storefiles */ Size getStoreFileSize(); - /** - * @return the memstore size - */ + /** Return the memstore size */ Size getMemStoreSize(); - /** - * @return the number of read requests made to region - */ + /** Return the number of read requests made to region */ long getReadRequestCount(); - /** - * @return the number of write requests made to region - */ + /** Return the number of write requests made to region */ long getWriteRequestCount(); - /** - * @return the number of coprocessor service requests made to region - */ + /** Return the number of coprocessor service requests made to region */ public long getCpRequestCount(); /** - * @return the number of write requests and read requests and coprocessor service requests made to - * region + * Return the number of write requests and read requests and coprocessor service requests made to + * region */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount() + getCpRequestCount(); } - /** - * @return the region name as a string - */ + /** Return the region name as a string */ default String getNameAsString() { return Bytes.toStringBinary(getRegionName()); } - /** - * @return the number of filtered read requests made to region - */ + /** Return the number of filtered read requests made to region */ long getFilteredReadRequestCount(); /** + * Return the current total size of root-level indexes for the region. + *

* TODO: why we pass the same value to different counters? Currently, the value from * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() see * HRegionServer#createRegionLoad. - * @return The current total size of root-level indexes for the region */ Size getStoreFileIndexSize(); - /** - * @return The current total size of root-level indexes for the region - */ + /** Return the current total size of root-level indexes for the region */ Size getStoreFileRootLevelIndexSize(); - /** - * @return The total size of all index blocks, not just the root level - */ + /** Return the total size of all index blocks, not just the root level */ Size getStoreFileUncompressedDataIndexSize(); - /** - * @return The total size of all Bloom filter blocks, not just loaded into the block cache - */ + /** Return the total size of all Bloom filter blocks, not just loaded into the block cache */ Size getBloomFilterSize(); - /** - * @return the total number of cells in current compaction - */ + /** Return the total number of cells in current compaction */ long getCompactingCellCount(); - /** - * @return the number of already compacted kvs in current compaction - */ + /** Return the number of already compacted kvs in current compaction */ long getCompactedCellCount(); /** + * Return the completed sequence Id for the region. + *

* This does not really belong inside RegionLoad but its being done in the name of expediency. - * @return the completed sequence Id for the region */ long getCompletedSequenceId(); - /** - * @return completed sequence id per store. - */ + /** Return the completed sequence id per store. */ Map getStoreSequenceId(); - /** - * @return the uncompressed size of the storefiles - */ + /** Return the uncompressed size of the storefiles */ Size getUncompressedStoreFileSize(); - /** - * @return the data locality of region in the regionserver. - */ + /** Return the data locality of region in the regionserver. */ float getDataLocality(); - /** - * @return the timestamp of the oldest hfile for any store of this region. - */ + /** Return the timestamp of the oldest hfile for any store of this region. */ long getLastMajorCompactionTimestamp(); - /** - * @return the reference count for the stores of this region - */ + /** Return the reference count for the stores of this region */ int getStoreRefCount(); /** - * @return the max reference count for any store file among all compacted stores files of this - * region + * Return the max reference count for any store file among all compacted stores files of this + * region */ int getMaxCompactedStoreFileRefCount(); /** + * Return the data locality for ssd of region in the regionserver. + *

* Different from dataLocality,this metric's numerator only include the data stored on ssd - * @return the data locality for ssd of region in the regionserver */ float getDataLocalityForSsd(); - /** - * @return the data at local weight of this region in the regionserver - */ + /** Return the data at local weight of this region in the regionserver */ long getBlocksLocalWeight(); /** + * Return the data at local with ssd weight of this region in the regionserver + *

* Different from blocksLocalWeight,this metric's numerator only include the data stored on ssd - * @return the data at local with ssd weight of this region in the regionserver */ long getBlocksLocalWithSsdWeight(); - /** - * @return the block total weight of this region - */ + /** Return the block total weight of this region */ long getBlocksTotalWeight(); - /** - * @return the compaction state of this region - */ + /** Return the compaction state of this region */ CompactionState getCompactionState(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java index 38286afa2d15..7f1e60da90fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java @@ -31,103 +31,84 @@ @InterfaceAudience.Public public interface ServerMetrics { + /** Return the server name **/ ServerName getServerName(); - /** - * @return the version number of a regionserver. - */ + /** Return the version number of the regionserver. */ default int getVersionNumber() { return 0; } - /** - * @return the string type version of a regionserver. - */ + /** Return the string formatted version of the regionserver. */ default String getVersion() { return "0.0.0"; } - /** - * @return the number of requests per second. - */ + /** Return the number of requests per second. */ long getRequestCountPerSecond(); - /** - * @return total Number of requests from the start of the region server. - */ + /** Return the total number of requests over the lifetime of the region server. */ long getRequestCount(); - /** - * @return total Number of read requests from the start of the region server. - */ + /** Return the total number of read requests over the lifetime of the region server. */ long getReadRequestsCount(); - /** - * @return total Number of write requests from the start of the region server. - */ + /** Return the total number of write requests over the lifetime of the region server. */ long getWriteRequestsCount(); - /** - * @return the amount of used heap - */ + /** Return the amount of used heap */ Size getUsedHeapSize(); - /** - * @return the maximum allowable size of the heap - */ + /** Return the maximum allowable size of the heap */ Size getMaxHeapSize(); + /** Return the info server port. */ int getInfoServerPort(); /** - * Call directly from client such as hbase shell - * @return the list of ReplicationLoadSource + * Return the list of replication load sources. + *

+ * Used by clients such as the hbase shell */ List getReplicationLoadSourceList(); /** - * Call directly from client such as hbase shell - * @return a map of ReplicationLoadSource list per peer id + * Return the map of replication load sources as a list list per peer id. + *

+ * Used by clients such as the hbase shell */ Map> getReplicationLoadSourceMap(); /** - * Call directly from client such as hbase shell n + * return the replication load sink. + *

+ * Used by clients such as the hbase shell */ @Nullable ReplicationLoadSink getReplicationLoadSink(); - /** - * @return region load metrics - */ + /** Return the region load metrics */ Map getRegionMetrics(); /** - * @return metrics per user + * Return the region metrics per user */ Map getUserMetrics(); - /** - * Return the RegionServer-level and Region-level coprocessors - * @return string set of loaded RegionServer-level and Region-level coprocessors - */ + /** Return the combined list of RegionServer-level and Region-level coprocessors. */ Set getCoprocessorNames(); - /** - * @return the timestamp (server side) of generating this metrics - */ + /** Return the timestamp (server side) when metrics were last collected. */ long getReportTimestamp(); - /** - * @return the last timestamp (server side) of generating this metrics - */ + /** Return the last timestamp (server side) when metrics were last collected. */ long getLastReportTimestamp(); /** - * Called directly from clients such as the hbase shell - * @return the active monitored tasks + * Return the list of active monitored tasks. + *

+ * Used by clients such as the hbase shell */ @Nullable List getTasks(); - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index 99f8520aa362..7ca833453c30 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -44,10 +44,6 @@ @InterfaceAudience.Private public final class ServerMetricsBuilder { - /** - * @param sn the server name - * @return a empty metrics - */ public static ServerMetrics of(ServerName sn) { return newBuilder(sn).build(); } @@ -300,6 +296,7 @@ public int getVersionNumber() { return versionNumber; } + @Override public String getVersion() { return version; } @@ -414,16 +411,20 @@ public String toString() { int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount(); maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); - uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); - storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE); - memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE); - storefileIndexSizeKB += r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + uncompressedStoreFileSizeMB = (long) (uncompressedStoreFileSizeMB + + r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)); + storeFileSizeMB = (long) (storeFileSizeMB + r.getStoreFileSize().get(Size.Unit.MEGABYTE)); + memStoreSizeMB = (long) (memStoreSizeMB + r.getMemStoreSize().get(Size.Unit.MEGABYTE)); + storefileIndexSizeKB = (long) (storefileIndexSizeKB + + r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE)); readRequestsCount += r.getReadRequestCount(); cpRequestsCount += r.getCpRequestCount(); writeRequestsCount += r.getWriteRequestCount(); filteredReadRequestsCount += r.getFilteredReadRequestCount(); - rootLevelIndexSizeKB += r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); - bloomFilterSizeMB += r.getBloomFilterSize().get(Size.Unit.MEGABYTE); + rootLevelIndexSizeKB = (long) (rootLevelIndexSizeKB + + r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE)); + bloomFilterSizeMB = + (long) (bloomFilterSizeMB + r.getBloomFilterSize().get(Size.Unit.MEGABYTE)); compactedCellCount += r.getCompactedCellCount(); compactingCellCount += r.getCompactingCellCount(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java index c248849e3630..692df9681427 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java @@ -70,28 +70,28 @@ public Size(double value, Unit unit) { } /** - * @return size unit + * Return the size unit */ public Unit getUnit() { return unit; } /** - * get the value + * Return the value */ public long getLongValue() { return (long) value; } /** - * get the value + * Return the value */ public double get() { return value; } /** - * get the value which is converted to specified unit. + * Return the value which is converted to specified unit. * @param unit size unit * @return the converted value */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java index 2710aa9be273..a67d893e8b01 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java @@ -40,43 +40,31 @@ interface ClientMetrics { long getFilteredReadRequestsCount(); } - /** - * @return the user name - */ + /** Return the user name */ byte[] getUserName(); - /** - * @return the number of read requests made by user - */ + /** Return the number of read requests made by user */ long getReadRequestCount(); - /** - * @return the number of write requests made by user - */ + /** Return the number of write requests made by user */ long getWriteRequestCount(); /** - * @return the number of write requests and read requests and coprocessor service requests made by - * the user + * Return the number of write requests and read requests and coprocessor service requests made by + * the user */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount(); } - /** - * @return the user name as a string - */ + /** Return the user name as a string */ default String getNameAsString() { return Bytes.toStringBinary(getUserName()); } - /** - * @return metrics per client(hostname) - */ + /** Return user metrics per client(hostname) */ Map getClientMetrics(); - /** - * @return count of filtered read requests for a user - */ + /** Return the number of filtered read requests for a user */ long getFilteredReadRequests(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java index ab63f19fec85..4a66283146d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.hbase.util.Strings; @@ -30,7 +31,8 @@ public final class UserMetricsBuilder { public static UserMetrics toUserMetrics(ClusterStatusProtos.UserLoad userLoad) { - UserMetricsBuilder builder = UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes()); + UserMetricsBuilder builder = + UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes(StandardCharsets.UTF_8)); userLoad.getClientMetricsList().stream() .map(clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java index bb44defbac6a..ede1ff31c2b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java @@ -26,12 +26,8 @@ abstract class AbstractResponse { public enum ResponseType { - - SINGLE(0), - MULTI(1); - - ResponseType(int value) { - } + SINGLE, + MULTI; } public abstract ResponseType type(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index 2380335e56b0..df14421e5444 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -133,10 +133,8 @@ private void populateStubs(Set addrs) throws IOException { * For describing the actual asynchronous rpc call. *

* Typically, you can use lambda expression to implement this interface as - * - *

-   * (c, s, d) -> s.xxx(c, your request here, d)
-   * 
+ *

+ * {@code (c, s, d) -> s.xxx(c, your request here, d) } */ @FunctionalInterface protected interface Callable { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index d2b57476128c..a296651dfaf6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -111,12 +111,11 @@ public interface Admin extends Abortable, Closeable { @Override boolean isAborted(); - /** - * @return Connection used by this object. - */ + /** Return the connection used by this object. */ Connection getConnection(); /** + * Return true if the table already exists. * @param tableName Table to check. * @return true if table exists already. * @throws IOException if a remote or network exception occurs @@ -189,7 +188,7 @@ default TableName[] listTableNames(Pattern pattern) throws IOException { * Get a table descriptor. * @param tableName as a {@link TableName} * @return the tableDescriptor - * @throws org.apache.hadoop.hbase.TableNotFoundException + * @throws org.apache.hadoop.hbase.TableNotFoundException if the table is not found * @throws IOException if a remote or network exception occurs */ TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException; @@ -383,20 +382,23 @@ default void disableTable(TableName tableName) throws IOException { } /** + * Return true if the table is online * @param tableName name of table to check - * @return true if table is on-line + * @return true if table is online * @throws IOException if a remote or network exception occurs */ boolean isTableEnabled(TableName tableName) throws IOException; /** + * Return true if the table is offline * @param tableName name of table to check - * @return true if table is off-line + * @return true if table is offline * @throws IOException if a remote or network exception occurs */ boolean isTableDisabled(TableName tableName) throws IOException; /** + * Return true if all regions of the table are available * @param tableName name of table to check * @return true if all regions of the table are available * @throws IOException if a remote or network exception occurs @@ -1102,7 +1104,7 @@ default ClusterMetrics getClusterMetrics() throws IOException { ClusterMetrics getClusterMetrics(EnumSet

* The {@code stubMaker} is just a delegation to the {@code newStub} call. Usually it is only a * one line lambda expression, like: - * - *

-   * 
-   * channel -> xxxService.newStub(channel)
-   * 
-   * 
- * + *

+ * {@code channel -> xxxService.newStub(channel) } * @param stubMaker a delegation to the actual {@code newStub} call. * @param callable a delegation to the actual protobuf rpc call. See the comment of * {@link ServiceCaller} for more details. @@ -1434,13 +1442,8 @@ CompletableFuture coprocessorService(Function stubMaker *

* The {@code stubMaker} is just a delegation to the {@code newStub} call. Usually it is only a * one line lambda expression, like: - * - *

-   * 
-   * channel -> xxxService.newStub(channel)
-   * 
-   * 
- * + *

+ * {@code channel -> xxxService.newStub(channel) } * @param stubMaker a delegation to the actual {@code newStub} call. * @param callable a delegation to the actual protobuf rpc call. See the comment of * {@link ServiceCaller} for more details. @@ -1647,7 +1650,6 @@ default CompletableFuture> hasUserPermissions(List per /** * Creates a new RegionServer group with the given name * @param groupName the name of the group - * @throws IOException if a remote or network exception occurs */ CompletableFuture addRSGroup(String groupName); @@ -1655,34 +1657,29 @@ default CompletableFuture> hasUserPermissions(List per * Get group info for the given group name * @param groupName the group name * @return group info - * @throws IOException if a remote or network exception occurs */ CompletableFuture getRSGroup(String groupName); /** * Get group info for the given hostPort * @param hostPort HostPort to get RSGroupInfo for - * @throws IOException if a remote or network exception occurs */ CompletableFuture getRSGroup(Address hostPort); /** * Get group info for the given table * @param tableName table name to get RSGroupInfo for - * @throws IOException if a remote or network exception occurs */ CompletableFuture getRSGroup(TableName tableName); /** * Lists current set of RegionServer groups - * @throws IOException if a remote or network exception occurs */ CompletableFuture> listRSGroups(); /** * Get all tables in this RegionServer group. * @param groupName the group name - * @throws IOException if a remote or network exception occurs * @see #getConfiguredNamespacesAndTablesInRSGroup(String) */ CompletableFuture> listTablesInRSGroup(String groupName); @@ -1697,7 +1694,6 @@ default CompletableFuture> hasUserPermissions(List per * in the group 'A', but this method will not return these tables but only the namespace 'nsA', * while the {@link #listTablesInRSGroup(String)} will return all these tables. * @param groupName the group name - * @throws IOException if a remote or network exception occurs * @see #listTablesInRSGroup(String) */ CompletableFuture, List>> @@ -1706,7 +1702,6 @@ default CompletableFuture> hasUserPermissions(List per /** * Remove RegionServer group associated with the given name * @param groupName the group name - * @throws IOException if a remote or network exception occurs */ CompletableFuture removeRSGroup(String groupName); @@ -1716,7 +1711,6 @@ default CompletableFuture> hasUserPermissions(List per * servers to join other clusters. So we need to remove these servers from the group. 2. * Dead/recovering/live servers will be disallowed. * @param servers set of servers to remove - * @throws IOException if a remote or network exception occurs */ CompletableFuture removeServersFromRSGroup(Set

servers); @@ -1724,7 +1718,6 @@ default CompletableFuture> hasUserPermissions(List per * Move given set of servers to the specified target RegionServer group * @param servers set of servers to move * @param groupName the group to move servers to - * @throws IOException if a remote or network exception occurs */ CompletableFuture moveServersToRSGroup(Set
servers, String groupName); @@ -1732,7 +1725,6 @@ default CompletableFuture> hasUserPermissions(List per * Set the RegionServer group for tables * @param tables tables to set group for * @param groupName group name for tables - * @throws IOException if a remote or network exception occurs */ CompletableFuture setRSGroup(Set tables, String groupName); @@ -1740,7 +1732,6 @@ default CompletableFuture> hasUserPermissions(List per * Balance regions in the given RegionServer group * @param groupName the group name * @return BalanceResponse details about the balancer run - * @throws IOException if a remote or network exception occurs */ default CompletableFuture balanceRSGroup(String groupName) { return balanceRSGroup(groupName, BalanceRequest.defaultInstance()); @@ -1751,7 +1742,6 @@ default CompletableFuture balanceRSGroup(String groupName) { * @param groupName the group name * @param request options to define how the balancer should run * @return BalanceResponse details about the balancer run - * @throws IOException if a remote or network exception occurs */ CompletableFuture balanceRSGroup(String groupName, BalanceRequest request); @@ -1759,7 +1749,6 @@ default CompletableFuture balanceRSGroup(String groupName) { * Rename rsgroup * @param oldName old rsgroup name * @param newName new rsgroup name - * @throws IOException if a remote or network exception occurs */ CompletableFuture renameRSGroup(String oldName, String newName); @@ -1767,7 +1756,6 @@ default CompletableFuture balanceRSGroup(String groupName) { * Update RSGroup configuration * @param groupName the group name * @param configuration new configuration of the group name to be set - * @throws IOException if a remote or network exception occurs */ CompletableFuture updateRSGroupConfig(String groupName, Map configuration); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java index de6e967f21c4..d3bec8b3cfbf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java @@ -29,9 +29,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -/** - * @since 2.0.0 - */ @InterfaceAudience.Private public class AsyncAdminRequestRetryingCaller extends AsyncRpcRetryingCaller { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index 5497b4a0b723..129ef418d8b4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -223,16 +223,14 @@ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, * This is a fluent style API, the code is like: * *
-   * 
    * table.checkAndMutate(row, family).qualifier(qualifier).ifNotExists().thenPut(put)
-   *     .thenAccept(succ -> {
-   *       if (succ) {
-   *         System.out.println("Check and put succeeded");
-   *       } else {
-   *         System.out.println("Check and put failed");
-   *       }
-   *     });
-   * 
+   *   .thenAccept(succ -> {
+   *     if (succ) {
+   *       System.out.println("Check and put succeeded");
+   *     } else {
+   *       System.out.println("Check and put failed");
+   *     }
+   *   });
    * 
* * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it @@ -250,11 +248,13 @@ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, interface CheckAndMutateBuilder { /** + * Supply a specific column qualifier to check. * @param qualifier column qualifier to check. */ CheckAndMutateBuilder qualifier(byte[] qualifier); /** + * Supply a time range to check. * @param timeRange time range to check. */ CheckAndMutateBuilder timeRange(TimeRange timeRange); @@ -273,12 +273,14 @@ default CheckAndMutateBuilder ifEquals(byte[] value) { } /** + * Check for a match against a sequence of bytes. * @param compareOp comparison operator to use * @param value the expected value */ CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value); /** + * Provide a Put to commit if the check passes. * @param put data to put if check succeeds * @return {@code true} if the new put was executed, {@code false} otherwise. The return value * will be wrapped by a {@link CompletableFuture}. @@ -286,6 +288,7 @@ default CheckAndMutateBuilder ifEquals(byte[] value) { CompletableFuture thenPut(Put put); /** + * Provide a Delete to commit if the check passes. * @param delete data to delete if check succeeds * @return {@code true} if the new delete was executed, {@code false} otherwise. The return * value will be wrapped by a {@link CompletableFuture}. @@ -293,6 +296,7 @@ default CheckAndMutateBuilder ifEquals(byte[] value) { CompletableFuture thenDelete(Delete delete); /** + * Provide a Mutation to commit if the check passes. * @param mutation mutations to perform if check succeeds * @return true if the new mutation was executed, false otherwise. The return value will be * wrapped by a {@link CompletableFuture}. @@ -308,16 +312,13 @@ default CheckAndMutateBuilder ifEquals(byte[] value) { * execute it. This is a fluent style API, the code is like: * *
-   * 
-   * table.checkAndMutate(row, filter).thenPut(put)
-   *     .thenAccept(succ -> {
-   *       if (succ) {
-   *         System.out.println("Check and put succeeded");
-   *       } else {
-   *         System.out.println("Check and put failed");
-   *       }
-   *     });
-   * 
+   * table.checkAndMutate(row, filter).thenPut(put).thenAccept(succ -> {
+   *   if (succ) {
+   *     System.out.println("Check and put succeeded");
+   *   } else {
+   *     System.out.println("Check and put failed");
+   *   }
+   * });
    * 
* * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it @@ -335,11 +336,13 @@ default CheckAndMutateBuilder ifEquals(byte[] value) { interface CheckAndMutateWithFilterBuilder { /** + * Supply a time range to check. * @param timeRange time range to check. */ CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange); /** + * Provide a Put to commit if the check passes. * @param put data to put if check succeeds * @return {@code true} if the new put was executed, {@code false} otherwise. The return value * will be wrapped by a {@link CompletableFuture}. @@ -347,6 +350,7 @@ interface CheckAndMutateWithFilterBuilder { CompletableFuture thenPut(Put put); /** + * Provide a Delete to commit if the check passes. * @param delete data to delete if check succeeds * @return {@code true} if the new delete was executed, {@code false} otherwise. The return * value will be wrapped by a {@link CompletableFuture}. @@ -354,6 +358,7 @@ interface CheckAndMutateWithFilterBuilder { CompletableFuture thenDelete(Delete delete); /** + * Provide a Mutation to commit if the check passes. * @param mutation mutations to perform if check succeeds * @return true if the new mutation was executed, false otherwise. The return value will be * wrapped by a {@link CompletableFuture}. @@ -440,16 +445,14 @@ default ResultScanner getScanner(byte[] family, byte[] qualifier) { *

* *

-   * 
-   * table.scanAll(new Scan().withStartRow(row, false).setLimit(1)).thenAccept(results -> {
+   * table.scanAll(new Scan().withStartRow(row, false).setLimit(1)).thenAccept(results -> {
    *   if (results.isEmpty()) {
-   *      System.out.println("No row after " + Bytes.toStringBinary(row));
+   *     System.out.println("No row after " + Bytes.toStringBinary(row));
    *   } else {
    *     System.out.println("The closest row after " + Bytes.toStringBinary(row) + " is "
-   *         + Bytes.toStringBinary(results.stream().findFirst().get().getRow()));
+   *       + Bytes.toStringBinary(results.stream().findFirst().get().getRow()));
    *   }
    * });
-   * 
    * 
*

* If your result set is very large, you should use other scan method to get a scanner or use @@ -572,13 +575,8 @@ default CompletableFuture> batchAll(List actions) { *

* The {@code stubMaker} is just a delegation to the {@code newStub} call. Usually it is only a * one line lambda expression, like: - * - *

-   * 
-   * channel -> xxxService.newStub(channel)
-   * 
-   * 
- * + *

+ * {@code channel -> xxxService.newStub(channel) } * @param stubMaker a delegation to the actual {@code newStub} call. * @param callable a delegation to the actual protobuf rpc call. See the comment of * {@link ServiceCaller} for more details. @@ -609,7 +607,7 @@ CompletableFuture coprocessorService(Function stubMaker * *

    * locateThenCall(byte[] row) {
-   *   locate(row).whenComplete((location, locateError) -> {
+   *   locate(row).whenComplete((location, locateError) -> {
    *     if (locateError != null) {
    *       callback.onError(locateError);
    *       return;
@@ -621,7 +619,7 @@  CompletableFuture coprocessorService(Function stubMaker
    *     } else {
    *       locateThenCall(region.getEndKey());
    *     }
-   *     sendCall().whenComplete((resp, error) -> {
+   *     sendCall().whenComplete((resp, error) -> {
    *       if (error != null) {
    *         callback.onRegionError(region, error);
    *       } else {
@@ -639,12 +637,14 @@  CompletableFuture coprocessorService(Function stubMaker
   interface CoprocessorCallback {
 
     /**
+     * Action to take when completed.
      * @param region the region that the response belongs to
      * @param resp   the response of the coprocessor call
      */
     void onRegionComplete(RegionInfo region, R resp);
 
     /**
+     * Action to take upon error.
      * @param region the region that the error belongs to
      * @param error  the response error of the coprocessor call
      */
@@ -675,6 +675,7 @@ interface CoprocessorCallback {
   interface CoprocessorServiceBuilder {
 
     /**
+     * Start the range at a given row, inclusive.
      * @param startKey start region selection with region containing this row, inclusive.
      */
     default CoprocessorServiceBuilder fromRow(byte[] startKey) {
@@ -682,12 +683,14 @@ default CoprocessorServiceBuilder fromRow(byte[] startKey) {
     }
 
     /**
+     * Start the range at a given row.
      * @param startKey  start region selection with region containing this row
      * @param inclusive whether to include the startKey
      */
     CoprocessorServiceBuilder fromRow(byte[] startKey, boolean inclusive);
 
     /**
+     * Stop the range at a given row, exclusive.
      * @param endKey select regions up to and including the region containing this row, exclusive.
      */
     default CoprocessorServiceBuilder toRow(byte[] endKey) {
@@ -695,6 +698,7 @@ default CoprocessorServiceBuilder toRow(byte[] endKey) {
     }
 
     /**
+     * Stop the range at a given row.
      * @param endKey    select regions up to and including the region containing this row
      * @param inclusive whether to include the endKey
      */
@@ -714,13 +718,8 @@ default CoprocessorServiceBuilder toRow(byte[] endKey) {
    * 

* The {@code stubMaker} is just a delegation to the {@code xxxService.newStub} call. Usually it * is only a one line lambda expression, like: - * - *

-   * 
-   * channel -> xxxService.newStub(channel)
-   * 
-   * 
- * + *

+ * {@code channel -> xxxService.newStub(channel) } * @param stubMaker a delegation to the actual {@code newStub} call. * @param callable a delegation to the actual protobuf rpc call. See the comment of * {@link ServiceCaller} for more details. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index 2e704c5bdc10..e990fa6d65ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -128,6 +128,7 @@ public String toString() { * Selector's internal state. * @param loc the location which causes exception. */ + @Override public void onError(HRegionLocation loc) { ConcurrentNavigableMap tableCache = computeIfAbsent(staleCache, loc.getRegion().getTable(), () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); @@ -159,18 +160,19 @@ private int getRandomReplicaId() { * When it looks up a location, it will call this method to find a replica region to go. For a * normal case, > 99% of region locations from catalog/meta replica will be up to date. In extreme * cases such as region server crashes, it will depends on how fast replication catches up. - * @param tablename table name it looks up + * @param tableName table name it looks up * @param row key it looks up. * @param locateType locateType, Only BEFORE and CURRENT will be passed in. * @return catalog replica id */ - public int select(final TableName tablename, final byte[] row, + @Override + public int select(final TableName tableName, final byte[] row, final RegionLocateType locateType) { Preconditions.checkArgument( locateType == RegionLocateType.BEFORE || locateType == RegionLocateType.CURRENT, "Expected type BEFORE or CURRENT but got: %s", locateType); - ConcurrentNavigableMap tableCache = staleCache.get(tablename); + ConcurrentNavigableMap tableCache = staleCache.get(tableName); // If there is no entry in StaleCache, select a random replica id. if (tableCache == null) { @@ -199,7 +201,7 @@ public int select(final TableName tablename, final byte[] row, (EnvironmentEdgeManager.currentTime() - entry.getValue().getTimestamp()) >= STALE_CACHE_TIMEOUT_IN_MILLISECONDS ) { - LOG.debug("Entry for table {} with startKey {}, {} times out", tablename, entry.getKey(), + LOG.debug("Entry for table {} with startKey {}, {} times out", tableName, entry.getKey(), entry); tableCache.remove(entry.getKey()); return getRandomReplicaId(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java index 67f30177663f..f59f378ede35 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java @@ -33,24 +33,18 @@ * APIs, the code are like: * *

- * 
  * // A CheckAndMutate operation where do the specified action if the column (specified by the
  * // family and the qualifier) of the row equals to the specified value
- * CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row)
- *   .ifEquals(family, qualifier, value)
- *   .build(put);
+ * CheckAndMutate checkAndMutate =
+ *   CheckAndMutate.newBuilder(row).ifEquals(family, qualifier, value).build(put);
  *
  * // A CheckAndMutate operation where do the specified action if the column (specified by the
  * // family and the qualifier) of the row doesn't exist
- * CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row)
- *   .ifNotExists(family, qualifier)
- *   .build(put);
+ * CheckAndMutate checkAndMutate =
+ *   CheckAndMutate.newBuilder(row).ifNotExists(family, qualifier).build(put);
  *
  * // A CheckAndMutate operation where do the specified action if the row matches the filter
- * CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row)
- *   .ifMatches(filter)
- *   .build(delete);
- * 
+ * CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row).ifMatches(filter).build(delete);
  * 
*/ @InterfaceAudience.Public @@ -97,6 +91,7 @@ public Builder ifEquals(byte[] family, byte[] qualifier, byte[] value) { } /** + * Check for a match * @param family family to check * @param qualifier qualifier to check * @param compareOp comparison operator to use @@ -113,6 +108,7 @@ public Builder ifMatches(byte[] family, byte[] qualifier, CompareOperator compar } /** + * Check for a match * @param filter filter to check * @return the CheckAndMutate object */ @@ -122,6 +118,7 @@ public Builder ifMatches(Filter filter) { } /** + * Set a time range to check * @param timeRange time range to check * @return the CheckAndMutate object */ @@ -144,6 +141,7 @@ private void preCheck(Row action) { } /** + * Build a check and mutate operation with a Put to commit if the check succeeds. * @param put data to put if check succeeds * @return a CheckAndMutate object */ @@ -157,6 +155,7 @@ public CheckAndMutate build(Put put) { } /** + * Build a check and mutate operation with a Delete to commit if the check succeeds. * @param delete data to delete if check succeeds * @return a CheckAndMutate object */ @@ -170,6 +169,7 @@ public CheckAndMutate build(Delete delete) { } /** + * Build a check and mutate operation with an Increment to commit if the check succeeds. * @param increment data to increment if check succeeds * @return a CheckAndMutate object */ @@ -183,6 +183,7 @@ public CheckAndMutate build(Increment increment) { } /** + * Build a check and mutate operation with an Append to commit if the check succeeds. * @param append data to append if check succeeds * @return a CheckAndMutate object */ @@ -196,6 +197,7 @@ public CheckAndMutate build(Append append) { } /** + * Build a check and mutate operation with a RowMutations to commit if the check succeeds. * @param mutations mutations to perform if check succeeds * @return a CheckAndMutate object */ @@ -251,7 +253,7 @@ private CheckAndMutate(byte[] row, Filter filter, TimeRange timeRange, Row actio } /** - * @return the row + * Return the row */ @Override public byte[] getRow() { @@ -259,56 +261,56 @@ public byte[] getRow() { } /** - * @return the family to check + * Return the family to check */ public byte[] getFamily() { return family; } /** - * @return the qualifier to check + * Return the qualifier to check */ public byte[] getQualifier() { return qualifier; } /** - * @return the comparison operator + * Return the comparison operator */ public CompareOperator getCompareOp() { return op; } /** - * @return the expected value + * Return the expected value */ public byte[] getValue() { return value; } /** - * @return the filter to check + * Return the filter to check */ public Filter getFilter() { return filter; } /** - * @return whether this has a filter or not + * Return whether this has a filter or not */ public boolean hasFilter() { return filter != null; } /** - * @return the time range to check + * Return the time range to check */ public TimeRange getTimeRange() { return timeRange; } /** - * @return the action done if check succeeds + * Return the action done if check succeeds */ public Row getAction() { return action; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java index 88b438a74a05..2717bc14ba46 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutateResult.java @@ -33,14 +33,14 @@ public CheckAndMutateResult(boolean success, Result result) { } /** - * @return Whether the CheckAndMutate operation is successful or not + * Return whether the CheckAndMutate operation is successful or not */ public boolean isSuccess() { return success; } /** - * @return It is used only for CheckAndMutate operations with Increment/Append. Otherwise null + * Return the result for CheckAndMutate operations with Increment/Append, or null otherwise */ public Result getResult() { return result; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java index 758cf508578a..a333eec72d6f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java @@ -38,8 +38,8 @@ private ClientIdGenerator() { } /** - * @return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill... Note - * though that new UUID in java by default is just a random number. + * Return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill... Note + * though that new UUID in java by default is just a random number. */ public static byte[] generateClientId() { byte[] selfBytes = getIpAddressBytes(); @@ -58,9 +58,8 @@ public static byte[] generateClientId() { return id; } - /** - * @return PID of the current process, if it can be extracted from JVM name, or null. - */ + /** Return the PID of the current process, if it can be extracted from JVM name, or null. */ + @SuppressWarnings("StringSplitter") public static Long getPid() { String name = ManagementFactory.getRuntimeMXBean().getName(); String[] nameParts = name.split("@"); @@ -77,8 +76,8 @@ public static Long getPid() { } /** - * @return Some IPv4/IPv6 address available on the current machine that is up, not virtual and not - * a loopback address. Empty array if none can be found or error occurred. + * Return an IPv4/IPv6 address available on the current machine that is up, not virtual and not a + * loopback address. Empty array if none can be found or error occurred. */ public static byte[] getIpAddressBytes() { try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index cc34d59c7321..322212261bcd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -19,6 +19,7 @@ import java.io.Closeable; import java.io.IOException; +import java.io.InterruptedIOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.net.InetAddress; @@ -26,6 +27,7 @@ import java.net.UnknownHostException; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.ExecutionException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetricsBuilder; @@ -218,16 +220,30 @@ public void connect(Configuration conf) throws IOException { } LOG.debug("Channel bindAddress={}, networkInterface={}, INA={}", bindAddress, ni, ina); - channel.joinGroup(ina, ni, null, channel.newPromise()); + try { + channel.joinGroup(ina, ni, null, channel.newPromise()).get(); + } catch (InterruptedException e) { + throw (IOException) new InterruptedIOException().initCause(e); + } catch (ExecutionException e) { + throw new IOException(e); + } } @Override public void close() { if (channel != null) { - channel.close(); + try { + channel.close().get(); + } catch (InterruptedException | ExecutionException e) { + LOG.warn("Exception while closing", e); + } channel = null; } - group.shutdownGracefully(); + try { + group.shutdownGracefully().get(); + } catch (InterruptedException | ExecutionException e) { + LOG.warn("Exception while shutting down", e); + } } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java index 5f11f8d258a6..d2445e284e95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java @@ -78,84 +78,85 @@ public interface ColumnFamilyDescriptor { }; /** - * @return The storefile/hfile blocksize for this column family. + * Return the storefile/hfile blocksize for this column family. */ int getBlocksize(); /** - * @return bloom filter type used for new StoreFiles in ColumnFamily + * Return the bloom filter type used for new store files in the column family. */ BloomType getBloomFilterType(); /** - * @return Compression type setting. + * Return the compression type for compactions. */ Compression.Algorithm getCompactionCompressionType(); /** - * @return Compression type setting for major compactions. + * Return the compression type for major compactions. */ Compression.Algorithm getMajorCompactionCompressionType(); /** - * @return Compression type setting for minor compactions. + * Return the compression type for minor compactions. */ Compression.Algorithm getMinorCompactionCompressionType(); /** - * @return Compression type setting. + * Return the compression type. */ Compression.Algorithm getCompressionType(); /** - * @return an unmodifiable map. + * Return the column family attributes as an unmodifiable map. */ Map getConfiguration(); /** + * Get a configuration value by key. * @param key the key whose associated value is to be returned * @return accessing the configuration value by key. */ String getConfigurationValue(String key); /** - * @return replication factor set for this CF + * Return the DFS replication factor. */ short getDFSReplication(); /** - * @return the data block encoding algorithm used in block cache and optionally on disk + * Return the data block encoding algorithm used in block cache and optionally on disk. */ DataBlockEncoding getDataBlockEncoding(); /** - * @return Return the raw crypto key attribute for the family, or null if not set + * Return the raw crypto key attribute for the family, or null if not set. */ byte[] getEncryptionKey(); /** - * @return Return the encryption algorithm in use by this family + * Return the encryption algorithm in use by this family. */ String getEncryptionType(); /** - * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for for - * this column family + * Return the in-memory compaction policy if set for the cf. Returns null if no policy is set for + * for this column family */ MemoryCompactionPolicy getInMemoryCompaction(); /** - * @return return the KeepDeletedCells + * Return the current KeepDeletedCells settings. */ KeepDeletedCells getKeepDeletedCells(); /** - * @return maximum number of versions + * Return the maximum number of versions to keep. */ int getMaxVersions(); /** - * @return The minimum number of versions to keep. + * Return the minimum number of versions to keep. */ int getMinVersions(); @@ -172,91 +173,91 @@ public interface ColumnFamilyDescriptor { long getMobThreshold(); /** - * @return a copy of Name of this column family + * Return a copy of the name of this column family */ byte[] getName(); /** - * @return Name of this column family + * Return the name of this column family as a String. */ String getNameAsString(); /** - * @return the scope tag + * Return the scope tag. */ int getScope(); /** - * Not using {@code enum} here because HDFS is not using {@code enum} for storage policy, see - * org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite for more details. - * @return Return the storage policy in use by this family + * Return the storage policy in use by this family. */ String getStoragePolicy(); /** - * @return Time-to-live of cell contents, in seconds. + * Return the time-to-live of cell contents, in seconds. */ int getTimeToLive(); /** + * Get a configuration value by key. * @param key The key. * @return A clone value. Null if no mapping for the key */ Bytes getValue(Bytes key); /** + * Get a configuration value by key. * @param key The key. * @return A clone value. Null if no mapping for the key */ String getValue(String key); /** + * Get a configuration value by key. * @param key The key. * @return A clone value. Null if no mapping for the key */ byte[] getValue(byte[] key); /** - * It clone all bytes of all elements. - * @return All values + * Return a map of all configuration values. */ Map getValues(); /** - * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX - * and BLOOM type blocks). + * Return true if hfile DATA type blocks should be cached (You cannot disable caching of INDEX and + * BLOOM type blocks). */ boolean isBlockCacheEnabled(); /** - * @return true if we should cache bloomfilter blocks on write + * Return true if we should cache bloomfilter blocks on write. */ boolean isCacheBloomsOnWrite(); /** - * @return true if we should cache data blocks on write + * Return true if we should cache data blocks on write. */ boolean isCacheDataOnWrite(); /** - * @return true if we should cache index blocks on write + * Return true if we should cache index blocks on write. */ boolean isCacheIndexesOnWrite(); /** - * @return Whether KV tags should be compressed along with DataBlockEncoding. When no - * DataBlockEncoding is been used, this is having no effect. + * Return whether KV tags should be compressed along with DataBlockEncoding. When no + * DataBlockEncoding is been used, this is having no effect. */ boolean isCompressTags(); /** - * @return true if we should evict cached blocks from the blockcache on close + * Return true if we should evict cached blocks from the blockcache on close */ boolean isEvictBlocksOnClose(); /** - * @return True if we are to favor keeping all values for this column family in the HRegionServer - * cache. + * Return true if we are to favor keeping all values for this column family in the HRegionServer + * cache. */ boolean isInMemory(); @@ -267,12 +268,12 @@ public interface ColumnFamilyDescriptor { boolean isMobEnabled(); /** - * @return true if we should prefetch blocks into the blockcache on open + * Return true if we should prefetch blocks into the blockcache on open */ boolean isPrefetchBlocksOnOpen(); /** - * @return Column family descriptor with only the customized attributes. + * Return the column family descriptor with only the customized attributes. */ String toStringCustomizedValues(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 80178027b6f2..d375c34431c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -42,9 +42,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema; -/** - * @since 2.0.0 - */ @InterfaceAudience.Public public class ColumnFamilyDescriptorBuilder { // For future backward compatibility @@ -330,6 +327,7 @@ public static Unit getUnit(String key) { } /** + * Assert the column family name is legal. * @param b Family name. * @return b * @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable' @@ -398,6 +396,7 @@ private ColumnFamilyDescriptorBuilder(final ColumnFamilyDescriptor desc) { } /** + * Serialize the descriptor to a byte array. * @param desc The table descriptor to serialize * @return This instance serialized with pb with pb magic prefix */ @@ -690,11 +689,6 @@ public Map getValues() { return Collections.unmodifiableMap(values); } - /** - * @param key The key. - * @param value The value. - * @return this (for chained invocation) - */ public ModifyableColumnFamilyDescriptor setValue(byte[] key, byte[] value) { return setValue(toBytesOrNull(key, Function.identity()), toBytesOrNull(value, Function.identity())); @@ -708,11 +702,6 @@ private ModifyableColumnFamilyDescriptor setValue(Bytes key, String value) { return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } - /** - * @param key The key. - * @param value The value. - * @return this (for chained invocation) - */ private ModifyableColumnFamilyDescriptor setValue(Bytes key, Bytes value) { if (value == null || value.getLength() == 0) { values.remove(key); @@ -749,6 +738,7 @@ public int getMaxVersions() { } /** + * Set the maximum number of versions * @param maxVersions maximum number of versions * @return this (for chained invocation) */ @@ -796,6 +786,7 @@ public int getBlocksize() { } /** + * Set the blocksize to use in storefiles * @param s Blocksize to use when writing out storefiles/hfiles on this column family. * @return this (for chained invocation) */ @@ -814,13 +805,6 @@ public Compression.Algorithm getCompressionType() { n -> Compression.Algorithm.valueOf(n.toUpperCase()), DEFAULT_COMPRESSION); } - /** - * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. - * See See LZO Compression for - * how to enable it. - * @param type Compression type setting. - * @return this (for chained invocation) - */ public ModifyableColumnFamilyDescriptor setCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_BYTES, type.name()); } @@ -901,6 +885,7 @@ public boolean isInMemory() { } /** + * Enable or disable in memory preference for the family * @param inMemory True if we are to favor keeping all values for this column family in the * HRegionServer cache * @return this (for chained invocation) @@ -916,7 +901,8 @@ public MemoryCompactionPolicy getInMemoryCompaction() { } /** - * @param inMemoryCompaction the prefered in-memory compaction policy for this column family + * Set the preferred in-memory compaction policy. + * @param inMemoryCompaction the preferred in-memory compaction policy for this column family * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor @@ -931,6 +917,7 @@ public KeepDeletedCells getKeepDeletedCells() { } /** + * Enable the KEEP_DELETED_CELLS feature. * @param keepDeletedCells True if deleted rows should not be collected immediately. * @return this (for chained invocation) */ @@ -959,6 +946,7 @@ public int getTimeToLive() { } /** + * Set the time to live of cell contents for the family * @param timeToLive Time-to-live of cell contents, in seconds. * @return this (for chained invocation) */ @@ -967,9 +955,10 @@ public ModifyableColumnFamilyDescriptor setTimeToLive(int timeToLive) { } /** + * Set the time to live of cell contents for the family * @param timeToLive Time-to-live of cell contents, in seconds. * @return this (for chained invocation) - * @throws org.apache.hadoop.hbase.exceptions.HBaseException + * @throws org.apache.hadoop.hbase.exceptions.HBaseException if the supplied string was invalid */ public ModifyableColumnFamilyDescriptor setTimeToLive(String timeToLive) throws HBaseException { return setTimeToLive(Integer.parseInt(PrettyPrinter.valueOf(timeToLive, Unit.TIME_INTERVAL))); @@ -981,6 +970,7 @@ public int getMinVersions() { } /** + * Set the minimum number of versions to keep * @param minVersions The minimum number of versions to keep. (used when timeToLive is set) * @return this (for chained invocation) */ @@ -1010,6 +1000,7 @@ public boolean isBlockCacheEnabled() { } /** + * Enable or disable the blockcache * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache * INDEX and BLOOM blocks; you cannot turn this off). * @return this (for chained invocation) @@ -1035,6 +1026,7 @@ public int getScope() { } /** + * Set the replication scope * @param scope the scope tag * @return this (for chained invocation) */ @@ -1049,6 +1041,7 @@ public boolean isCacheDataOnWrite() { } /** + * Enable or disable caching of data blocks on write * @param value true if we should cache data blocks on write * @return this (for chained invocation) */ @@ -1063,6 +1056,7 @@ public boolean isCacheIndexesOnWrite() { } /** + * Enable or disable caching of index blocks on write * @param value true if we should cache index blocks on write * @return this (for chained invocation) */ @@ -1077,6 +1071,7 @@ public boolean isCacheBloomsOnWrite() { } /** + * Enable or disable caching of bloom filter blocks on write * @param value true if we should cache bloomfilter blocks on write * @return this (for chained invocation) */ @@ -1091,6 +1086,7 @@ public boolean isEvictBlocksOnClose() { } /** + * Enable or disable block eviction at region close time * @param value true if we should evict cached blocks from the blockcache on close * @return this (for chained invocation) */ @@ -1105,6 +1101,7 @@ public boolean isPrefetchBlocksOnOpen() { } /** + * Enable or disable prefetch of data blocks at region open time * @param value true if we should prefetch blocks into the blockcache on open * @return this (for chained invocation) */ @@ -1233,7 +1230,7 @@ public int compareTo(ModifyableColumnFamilyDescriptor other) { } /** - * @return This instance serialized with pb with pb magic prefix + * Return this instance serialized with pb with pb magic prefix * @see #parseFrom(byte[]) */ private byte[] toByteArray() { @@ -1241,10 +1238,11 @@ private byte[] toByteArray() { } /** + * Parse a serialized descriptor * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb magic * prefix * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from bytes - * n * @see #toByteArray() + * @see #toByteArray() */ private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) throws DeserializationException { @@ -1290,8 +1288,8 @@ public ModifyableColumnFamilyDescriptor setConfiguration(String key, String valu } /** - * Remove a configuration setting represented by the key from the {@link #configuration} map. n - * * @return this (for chained invocation) + * Remove a configuration setting represented by the key from the {@link #configuration} map. + * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor removeConfiguration(final String key) { return setConfiguration(key, null); @@ -1303,8 +1301,8 @@ public String getEncryptionType() { } /** - * Set the encryption algorithm for use with this family n * @return this (for chained - * invocation) + * Set the encryption algorithm for use with this family + * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEncryptionType(String algorithm) { return setValue(ENCRYPTION_BYTES, algorithm); @@ -1316,7 +1314,8 @@ public byte[] getEncryptionKey() { } /** - * Set the raw crypto key attribute for the family n * @return this (for chained invocation) + * Set the raw crypto key attribute for the family + * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) { return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java index 225bb072db70..54502e7a89c4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java @@ -25,10 +25,6 @@ */ @InterfaceAudience.Public public enum CompactType { - - NORMAL(0), - MOB(1); - - CompactType(int value) { - } + NORMAL, + MOB; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index 385007703e2f..b48674faaab3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -60,7 +60,7 @@ public interface Connection extends Abortable, Closeable { */ /** - * @return Configuration instance being used by this Connection instance. + * Return the configuration of this connection. */ Configuration getConfiguration(); @@ -183,7 +183,7 @@ default BufferedMutator getBufferedMutator(TableName tableName) throws IOExcepti AsyncConnection toAsyncConnection(); /** - * @return the cluster ID unique to this HBase cluster. + * Return the cluster ID unique to this HBase cluster. */ String getClusterId(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java index 3331c8107009..1ea0415e4944 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java @@ -29,22 +29,23 @@ @InterfaceAudience.Public public interface CoprocessorDescriptor { /** - * @return the name of the class or interface represented by this object. + * Return the name of the class or interface represented by this object. */ String getClassName(); /** - * @return Path of the jar file. If it's null, the class will be loaded from default classloader. + * Return the path of the jar file. If it's null, the class will be loaded from default + * classloader. */ Optional getJarPath(); /** - * @return The order to execute this coprocessor + * Return the order to execute this coprocessor */ int getPriority(); /** - * @return Arbitrary key-value parameter pairs passed into the coprocessor. + * Return the coprocessor properties as a map. */ Map getProperties(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index 755c0ca0b8c0..ecc388cc3c8d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -111,8 +111,6 @@ public Delete(final byte[] row, final int rowOffset, final int rowLength) { *

* This timestamp is ONLY used for a delete row operation. If specifying families or columns, you * must specify each timestamp individually. - * @param row We make a local copy of this passed in row. nn * @param timestamp maximum version - * timestamp (only for delete row) */ public Delete(final byte[] row, final int rowOffset, final int rowLength, long timestamp) { checkRow(row, rowOffset, rowLength); @@ -121,6 +119,7 @@ public Delete(final byte[] row, final int rowOffset, final int rowLength, long t } /** + * Create a Delete operation given another as template. * @param deleteToCopy delete to copy */ public Delete(final Delete deleteToCopy) { @@ -142,8 +141,9 @@ public Delete(byte[] row, long ts, NavigableMap> familyMap) { /** * Add an existing delete marker to this Delete object. * @param cell An existing cell of type "delete". - * @return this for invocation chaining n + * @return this for invocation chaining */ + @Override public Delete add(Cell cell) throws IOException { super.add(cell); return this; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index 17975ff631de..0a3bd690063f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -445,7 +445,7 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || getClass() != obj.getClass()) { + if (!(obj instanceof Get)) { return false; } Row other = (Row) obj; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index 11da33d8106a..21b4bbceae72 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -96,6 +96,7 @@ public Increment(byte[] row, long ts, NavigableMap> familyMap * Add the specified KeyValue to this operation. * @param cell individual Cell n * @throws java.io.IOException e */ + @Override public Increment add(Cell cell) throws IOException { super.add(cell); return this; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java index 7804e48de9f0..3f94fc30f9c7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java @@ -29,11 +29,8 @@ @InterfaceAudience.Public public enum IsolationLevel { - READ_COMMITTED(1), - READ_UNCOMMITTED(2); - - IsolationLevel(int value) { - } + READ_COMMITTED, + READ_UNCOMMITTED; public byte[] toBytes() { return new byte[] { toByte() }; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java index b2d217da3de0..4cbacaafb55e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java @@ -112,8 +112,7 @@ public boolean equals(Object o) { if (this == o) { return true; } - - if (o == null || getClass() != o.getClass()) { + if (!(o instanceof LogQueryFilter)) { return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java index 4be0362be85d..f36b3e162187 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java @@ -78,11 +78,11 @@ public void callMethod(MethodDescriptor method, RpcController controller, Messag Message responsePrototype, RpcCallback done) { addListener( callerBuilder.action((c, s) -> rpcCall(method, request, responsePrototype, c, s)).call(), - ((r, e) -> { + (r, e) -> { if (e != null) { ((ClientCoprocessorRpcController) controller).setFailed(e); } done.run(r); - })); + }); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index a031d3530971..917badef0568 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -75,6 +75,7 @@ public class MasterRegistry extends AbstractRpcBasedConnectionRegistry { * separated host[:port] values. If no port number if specified, default master port is assumed. * @param conf Configuration to parse from. */ + @SuppressWarnings("StringSplitter") public static Set parseMasterAddrs(Configuration conf) throws UnknownHostException { Set masterAddrs = new HashSet<>(); String configuredMasters = getMasterAddr(conf); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java index 97a01e1bb6ec..b9eee785dfd5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -44,9 +44,7 @@ public MultiResponse() { super(); } - /** - * @return Number of pairs in this container - */ + /** Return the number of results in this container */ public int size() { int size = 0; for (RegionResult result : results.values()) { @@ -56,25 +54,30 @@ public int size() { } /** - * Add the pair to the container, grouped by the regionName n * @param originalIndex the original - * index of the Action (request). - * @param resOrEx the result or error; will be empty for successful Put and Delete actions. + * Add the pair to the container, grouped by the regionName + * @param regionName the region + * @param originalIndex the original index of the Action (request). + * @param resOrEx the result or error; will be empty for successful Put and Delete actions. */ public void add(byte[] regionName, int originalIndex, Object resOrEx) { getResult(regionName).addResult(originalIndex, resOrEx); } + /** + * Add the pair to the container, grouped by the regionName + * @param regionName the region + * @param ie the error + */ public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } - /** - * @return the exception for the region, if any. Null otherwise. - */ + /** Return the exception for the region, if any. Null otherwise. */ public Throwable getException(byte[] regionName) { return exceptions.get(regionName); } + /** Return all exceptions for the responses, if any. */ public Map getExceptions() { return exceptions; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index 22114f8f624a..9556bafbc07d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -126,22 +126,20 @@ private static int checkReplicaId(int regionId) { this.encodedNameAsBytes = Bytes.toBytes(this.encodedName); } - /** - * @return Return a short, printable name for this region (usually encoded name) for us logging. - */ + /** Return a short, printable name for this region (usually encoded name) for us logging. */ @Override public String getShortNameToLog() { return RegionInfo.prettyPrint(this.getEncodedName()); } - /** @return the regionId */ + /** Return the region id */ @Override public long getRegionId() { return regionId; } /** - * @return the regionName as an array of bytes. + * Return the regionName as an array of bytes. * @see #getRegionNameAsString() */ @Override @@ -150,14 +148,14 @@ public byte[] getRegionName() { } /** - * @return Region name as a String for use in logging, etc. + * Return the region name as a String for use in logging, etc. */ @Override public String getRegionNameAsString() { return RegionInfo.getRegionNameAsString(this, this.regionName); } - /** @return the encoded region name */ + /** Return the encoded region name */ @Override public String getEncodedName() { return this.encodedName; @@ -168,28 +166,26 @@ public byte[] getEncodedNameAsBytes() { return this.encodedNameAsBytes; } - /** @return the startKey */ + /** Return the start key */ @Override public byte[] getStartKey() { return startKey; } - /** @return the endKey */ + /** Return the end key */ @Override public byte[] getEndKey() { return endKey; } - /** - * Get current table name of the region n - */ + /** Return the table name of the region */ @Override public TableName getTable() { return this.tableName; } /** - * Returns true if the given inclusive range of rows is fully contained by this region. For + * Return true if the given inclusive range of rows is fully contained by this region. For * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return * true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) @@ -208,9 +204,7 @@ public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { return firstKeyInRange && lastKeyInRange; } - /** - * Return true if the given row falls in this region. - */ + /** Return true if the given row falls in this region. */ @Override public boolean containsRow(byte[] row) { CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName); @@ -219,30 +213,26 @@ public boolean containsRow(byte[] row) { || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); } - /** @return true if this region is a meta region */ + /** Return true if this region is a meta region */ @Override public boolean isMetaRegion() { return tableName.equals(TableName.META_TABLE_NAME); } - /** - * @return True if has been split and has daughters. - */ + /** Return true if has been split and has daughters. */ @Override public boolean isSplit() { return this.split; } - /** - * @param split set split status n - */ + /** Set set split status */ public MutableRegionInfo setSplit(boolean split) { this.split = split; return this; } /** - * @return True if this region is offline. + * Return true if this region is offline. * @deprecated since 3.0.0 and will be removed in 4.0.0 * @see HBASE-25210 */ @@ -255,7 +245,7 @@ public boolean isOffline() { /** * The parent of a region split is offline while split daughters hold references to the parent. * Offlined regions are closed. - * @param offLine Set online/offline status. n + * @param offLine Set online/offline status. */ public MutableRegionInfo setOffline(boolean offLine) { this.offLine = offLine; @@ -263,7 +253,7 @@ public MutableRegionInfo setOffline(boolean offLine) { } /** - * @return True if this is a split parent region. + * Return true if this is a split parent region. * @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead. * @see HBASE-25210 */ @@ -279,18 +269,12 @@ public boolean isSplitParent() { return true; } - /** - * Returns the region replica id - * @return returns region replica id - */ + /** Return the region replica id */ @Override public int getReplicaId() { return replicaId; } - /** - * @see Object#toString() - */ @Override public String toString() { return "{ENCODED => " + getEncodedName() + ", " + HConstants.NAME + " => '" @@ -300,9 +284,6 @@ public String toString() { + ((replicaId > 0) ? ", REPLICA_ID => " + replicaId : "") + "}"; } - /** - * @see Object#equals(Object) - */ @Override public boolean equals(Object o) { if (this == o) { @@ -317,9 +298,6 @@ public boolean equals(Object o) { return compareTo((RegionInfo) o) == 0; } - /** - * @see Object#hashCode() - */ @Override public int hashCode() { return this.hashCode; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index cecaed3388cc..5484649f03d1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -156,27 +156,23 @@ List getCellList(byte[] family) { return list; } - /* + /** * Create a KeyValue with this objects row key and the Put identifier. - * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value) { return new KeyValue(this.row, family, qualifier, ts, KeyValue.Type.Put, value); } /** - * Create a KeyValue with this objects row key and the Put identifier. nnnn * @param tags - - * Specify the Tags as an Array - * @return a KeyValue with this objects row key and the Put identifier. + * Create a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value, Tag[] tags) { KeyValue kvWithTag = new KeyValue(this.row, family, qualifier, ts, value, tags); return kvWithTag; } - /* + /** * Create a KeyValue with this objects row key and the Put identifier. - * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value, Tag[] tags) { @@ -261,7 +257,7 @@ private static Map cellToStringMap(Cell c) { if (tags != null) { List tagsString = new ArrayList<>(tags.size()); for (Tag t : tags) { - tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); + tagsString.add(t.getType() + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); } stringMap.put("tag", tagsString); } @@ -327,7 +323,7 @@ public Mutation setClusterIds(List clusterIds) { } /** - * @return the set of clusterIds that have consumed the mutation + * Return the set of clusterIds that have consumed the mutation. */ public List getClusterIds() { List clusterIds = new ArrayList<>(); @@ -343,7 +339,7 @@ public List getClusterIds() { } /** - * Sets the visibility expression associated with cells in this Mutation. n + * Sets the visibility expression associated with cells in this Mutation. */ public Mutation setCellVisibility(CellVisibility expression) { this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, @@ -352,7 +348,7 @@ public Mutation setCellVisibility(CellVisibility expression) { } /** - * @return CellVisibility associated with cells in this Mutation. n + * Return the cell visibility associated with cells in this Mutation. */ public CellVisibility getCellVisibility() throws DeserializationException { byte[] cellVisibilityBytes = this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY); @@ -360,29 +356,17 @@ public CellVisibility getCellVisibility() throws DeserializationException { return toCellVisibility(cellVisibilityBytes); } - /** - * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a - * protocol buffer CellVisibility - */ static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) { ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder(); builder.setExpression(cellVisibility.getExpression()); return builder.build(); } - /** - * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted - * client CellVisibility - */ private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) { if (proto == null) return null; return new CellVisibility(proto.getExpression()); } - /** - * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the - * converted client CellVisibility n - */ private static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException { if (protoBytes == null) return null; @@ -410,14 +394,14 @@ public int size() { } /** - * @return the number of different families + * Return the number of different families */ public int numFamilies() { return getFamilyCellMap().size(); } /** - * @return Calculate what Mutation adds to class heap size. + * Calculate and return what the Mutation adds to class heap size. */ @Override public long heapSize() { @@ -448,13 +432,14 @@ public long heapSize() { } /** - * @return The serialized ACL for this operation, or null if none + * Return the serialized ACL for this operation, or null if none */ public byte[] getACL() { return getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL); } /** + * Set an ACL for this operation. * @param user User short name * @param perms Permissions for the user */ @@ -465,6 +450,7 @@ public Mutation setACL(String user, Permission perms) { } /** + * Set an ACL for this operation. * @param perms A map of permissions for a user or users */ public Mutation setACL(Map perms) { @@ -499,9 +485,6 @@ public Mutation setTTL(long ttl) { return this; } - /** - * @return current value for returnResults - */ // Used by Increment and Append only. @InterfaceAudience.Private protected boolean isReturnResults() { @@ -509,8 +492,8 @@ protected boolean isReturnResults() { return v == null ? true : Bytes.toBoolean(v); } - @InterfaceAudience.Private // Used by Increment and Append only. + @InterfaceAudience.Private protected Mutation setReturnResults(boolean returnResults) { setAttribute(RETURN_RESULTS, Bytes.toBytes(returnResults)); return this; @@ -608,7 +591,7 @@ public List get(byte[] family, byte[] qualifier) { return filteredList; } - /* + /** * Private method to determine if this object's familyMap contains the given value assigned to the * given family, qualifier and timestamp respecting the 2 boolean arguments nnnnnn * @return * returns true if the given family, qualifier timestamp and value already has an existing diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java index 3020be221059..75123eefe660 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java @@ -29,9 +29,9 @@ public interface NonceGenerator { static final String CLIENT_NONCES_ENABLED_KEY = "hbase.client.nonces.enabled"; - /** @return the nonce group (client ID) of this client manager. */ + /** Return the nonce group (client ID) of this client manager. */ long getNonceGroup(); - /** @return New nonce. */ + /** Return a new nonce. */ long newNonce(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index bcc1bda9ef02..5e821f075461 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -203,6 +203,7 @@ public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer va * immutable and its backing array will not be modified for the duration of this Put. * @param cell individual cell n * @throws java.io.IOException e */ + @Override public Put add(Cell cell) throws IOException { super.add(cell); return this; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index ab0d9887df2b..757842e0d4eb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -67,7 +67,7 @@ public Query setFilter(Filter filter) { } /** - * Sets the authorizations to be used by this Query n + * Sets the authorizations to be used by this Query. */ public Query setAuthorizations(Authorizations authorizations) { this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, @@ -76,7 +76,7 @@ public Query setAuthorizations(Authorizations authorizations) { } /** - * @return The authorizations this Query is associated with. n + * Return the authorizations this Query is associated with. */ public Authorizations getAuthorizations() throws DeserializationException { byte[] authorizationsBytes = this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY); @@ -85,6 +85,7 @@ public Authorizations getAuthorizations() throws DeserializationException { } /** + * Get the ACL for the operation. * @return The serialized ACL for this operation, or null if none */ public byte[] getACL() { @@ -92,6 +93,7 @@ public byte[] getACL() { } /** + * Set the ACL for the operation. * @param user User short name * @param perms Permissions for the user */ @@ -102,6 +104,7 @@ public Query setACL(String user, Permission perms) { } /** + * Set the ACL for the operation. * @param perms A map of permissions for a user or users */ public Query setACL(Map perms) { @@ -163,9 +166,8 @@ public Query setIsolationLevel(IsolationLevel level) { } /** - * @return The isolation level of this query. If no isolation level was set for this query object, - * then it returns READ_COMMITTED. - * @return The IsolationLevel for this query + * Return the isolation level of this query. If no isolation level was set for this query object, + * then it returns READ_COMMITTED. */ public IsolationLevel getIsolationLevel() { byte[] attr = getAttribute(ISOLATION_LEVEL); @@ -221,7 +223,7 @@ public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { } /** - * @return A map of column families to time ranges + * Return a map of column families to time ranges */ public Map getColumnFamilyTimeRange() { return this.colFamTimeRangeMap; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 3ec2c741293e..028808ce8baf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -502,11 +502,6 @@ private CompletableFuture procedureCall( return future; } - @FunctionalInterface - private interface TableOperator { - CompletableFuture operate(TableName table); - } - @Override public CompletableFuture tableExists(TableName tableName) { if (TableName.isMetaTableName(tableName)) { @@ -730,6 +725,7 @@ private static CompletableFuture completeCheckTableState( } @Override + @SuppressWarnings("FutureReturnValueIgnored") public CompletableFuture isTableEnabled(TableName tableName) { if (TableName.isMetaTableName(tableName)) { return CompletableFuture.completedFuture(true); @@ -744,6 +740,7 @@ public CompletableFuture isTableEnabled(TableName tableName) { } @Override + @SuppressWarnings("FutureReturnValueIgnored") public CompletableFuture isTableDisabled(TableName tableName) { if (TableName.isMetaTableName(tableName)) { return CompletableFuture.completedFuture(false); @@ -1559,11 +1556,13 @@ public CompletableFuture assign(byte[] regionName) { future.completeExceptionally(err); return; } - addListener(this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this. call( - controller, stub, RequestConverter.buildAssignRegionRequest(regionInfo.getRegionName()), - (s, c, req, done) -> s.assignRegion(c, req, done), resp -> null))) - .call(), (ret, err2) -> { + addListener( + this. newMasterCaller().priority(regionInfo.getTable()) + .action((controller, stub) -> this. call( + controller, stub, RequestConverter.buildAssignRegionRequest(regionInfo.getRegionName()), + (s, c, req, done) -> s.assignRegion(c, req, done), resp -> null)) + .call(), + (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { @@ -1584,10 +1583,10 @@ public CompletableFuture unassign(byte[] regionName) { } addListener( this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this. this. call(controller, stub, RequestConverter.buildUnassignRegionRequest(regionInfo.getRegionName()), - (s, c, req, done) -> s.unassignRegion(c, req, done), resp -> null))) + (s, c, req, done) -> s.unassignRegion(c, req, done), resp -> null)) .call(), (ret, err2) -> { if (err2 != null) { @@ -1608,14 +1607,11 @@ public CompletableFuture offline(byte[] regionName) { future.completeExceptionally(err); return; } - addListener( - this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this. call(controller, stub, - RequestConverter.buildOfflineRegionRequest(regionInfo.getRegionName()), - (s, c, req, done) -> s.offlineRegion(c, req, done), resp -> null))) - .call(), - (ret, err2) -> { + addListener(this. newMasterCaller().priority(regionInfo.getTable()) + .action((controller, stub) -> this. call( + controller, stub, RequestConverter.buildOfflineRegionRequest(regionInfo.getRegionName()), + (s, c, req, done) -> s.offlineRegion(c, req, done), resp -> null)) + .call(), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { @@ -2236,7 +2232,7 @@ private CompletableFuture internalDeleteSnapshots(Pattern tableNamePattern listSnapshotsFuture = getCompletedSnapshots(tableNamePattern, snapshotNamePattern); } CompletableFuture future = new CompletableFuture<>(); - addListener(listSnapshotsFuture, ((snapshotDescriptions, err) -> { + addListener(listSnapshotsFuture, (snapshotDescriptions, err) -> { if (err != null) { future.completeExceptionally(err); return; @@ -2253,7 +2249,7 @@ private CompletableFuture internalDeleteSnapshots(Pattern tableNamePattern future.complete(v); } }); - })); + }); return future; } @@ -4010,10 +4006,9 @@ public CompletableFuture moveServersToRSGroup(Set

servers, String @Override public CompletableFuture addRSGroup(String groupName) { return this. newMasterCaller() - .action( - ((controller, stub) -> this. call(controller, - stub, AddRSGroupRequest.newBuilder().setRSGroupName(groupName).build(), - (s, c, req, done) -> s.addRSGroup(c, req, done), resp -> null))) + .action((controller, stub) -> this. call( + controller, stub, AddRSGroupRequest.newBuilder().setRSGroupName(groupName).build(), + (s, c, req, done) -> s.addRSGroup(c, req, done), resp -> null)) .call(); } @@ -4081,9 +4076,9 @@ private CompletableFuture> getSlowLogResponseFromServer(ServerNam private CompletableFuture clearSlowLogsResponses(final ServerName serverName) { return this. newAdminCaller() - .action(((controller, stub) -> this.adminCall(controller, stub, + .action((controller, stub) -> this.adminCall(controller, stub, RequestConverter.buildClearSlowLogResponseRequest(), - AdminService.Interface::clearSlowLogsResponses, ProtobufUtil::toClearSlowLogPayload))) + AdminService.Interface::clearSlowLogsResponses, ProtobufUtil::toClearSlowLogPayload)) .serverName(serverName).call(); } @@ -4124,15 +4119,14 @@ Pair, List>> call(controller, stub, @Override public CompletableFuture getRSGroup(Address hostPort) { return this. newMasterCaller() - .action( - ((controller, stub) -> this. call(controller, stub, GetRSGroupInfoOfServerRequest.newBuilder() + .action((controller, stub) -> this. call(controller, stub, + GetRSGroupInfoOfServerRequest.newBuilder() .setServer(HBaseProtos.ServerName.newBuilder().setHostName(hostPort.getHostname()) .setPort(hostPort.getPort()).build()) - .build(), (s, c, req, done) -> s.getRSGroupInfoOfServer(c, req, done), - resp -> resp.hasRSGroupInfo() - ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) - : null))) + .build(), + (s, c, req, done) -> s.getRSGroupInfoOfServer(c, req, done), + resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null)) .call(); } @@ -4160,7 +4154,7 @@ public CompletableFuture setRSGroup(Set tables, String groupNam } }); } - addListener(listTableDescriptors(new ArrayList<>(tables)), ((tableDescriptions, err) -> { + addListener(listTableDescriptors(new ArrayList<>(tables)), (tableDescriptions, err) -> { if (err != null) { future.completeExceptionally(err); return; @@ -4184,40 +4178,40 @@ public CompletableFuture setRSGroup(Set tables, String groupNam future.complete(v); } }); - })); + }); return future; } @Override public CompletableFuture getRSGroup(TableName table) { return this. newMasterCaller() - .action(((controller, stub) -> this. this. call(controller, stub, GetRSGroupInfoOfTableRequest.newBuilder() .setTableName(ProtobufUtil.toProtoTableName(table)).build(), (s, c, req, done) -> s.getRSGroupInfoOfTable(c, req, done), - resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null))) + resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null)) .call(); } @Override public CompletableFuture getRSGroup(String groupName) { return this. newMasterCaller() - .action(((controller, stub) -> this. this. call(controller, stub, GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build(), (s, c, req, done) -> s.getRSGroupInfo(c, req, done), - resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null))) + resp -> resp.hasRSGroupInfo() ? ProtobufUtil.toGroupInfo(resp.getRSGroupInfo()) : null)) .call(); } @Override public CompletableFuture renameRSGroup(String oldName, String newName) { return this. newMasterCaller() - .action(((controller, stub) -> this. call( + .action((controller, stub) -> this. call( controller, stub, RenameRSGroupRequest.newBuilder().setOldRsgroupName(oldName) .setNewRsgroupName(newName).build(), - (s, c, req, done) -> s.renameRSGroup(c, req, done), resp -> null))) + (s, c, req, done) -> s.renameRSGroup(c, req, done), resp -> null)) .call(); } @@ -4231,9 +4225,9 @@ public CompletableFuture updateRSGroupConfig(String groupName, NameStringPair.newBuilder().setName(e.getKey()).setValue(e.getValue()).build())); } return this. newMasterCaller() - .action(((controller, stub) -> this. this. call(controller, stub, request.build(), - (s, c, req, done) -> s.updateRSGroupConfig(c, req, done), resp -> null))) + (s, c, req, done) -> s.updateRSGroupConfig(c, req, done), resp -> null)) .call(); } @@ -4287,9 +4281,9 @@ public CompletableFuture> getLogEntries(Set serverNam public CompletableFuture flushMasterStore() { FlushMasterStoreRequest.Builder request = FlushMasterStoreRequest.newBuilder(); return this. newMasterCaller() - .action(((controller, stub) -> this. this. call(controller, stub, request.build(), - (s, c, req, done) -> s.flushMasterStore(c, req, done), resp -> null))) + (s, c, req, done) -> s.flushMasterStore(c, req, done), resp -> null)) .call(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java index af0b20908031..ff75c0725ce5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java @@ -776,7 +776,7 @@ private boolean locateFinished(RegionInfo region, byte[] endKey, boolean endKeyI int c = Bytes.compareTo(endKey, region.getEndKey()); // 1. if the region contains endKey // 2. endKey is equal to the region's endKey and we do not want to include endKey. - return c < 0 || c == 0 && !endKeyInclusive; + return c < 0 || (c == 0 && !endKeyInclusive); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java index 4bf726079463..de3bc9a90d4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java @@ -92,7 +92,7 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || getClass() != obj.getClass()) { + if (!(obj instanceof RegionCoprocessorServiceExec)) { return false; } return compareTo((RegionCoprocessorServiceExec) obj) == 0; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index ca1db64719e0..9bbef1cdcc99 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -152,63 +152,63 @@ public interface RegionInfo extends Comparable { }; /** - * @return Return a short, printable name for this region (usually encoded name) for us logging. + * Return a short, printable name for this region (usually encoded name) for us logging. */ String getShortNameToLog(); /** - * @return the regionId. + * Return the region id. */ long getRegionId(); /** - * @return the regionName as an array of bytes. + * Return the regionName as an array of bytes. * @see #getRegionNameAsString() */ byte[] getRegionName(); /** - * @return Region name as a String for use in logging, etc. + * Return the region name as a String for use in logging, etc. */ String getRegionNameAsString(); /** - * @return the encoded region name. + * Return the encoded region name. */ String getEncodedName(); /** - * @return the encoded region name as an array of bytes. + * Return the encoded region name as an array of bytes. */ byte[] getEncodedNameAsBytes(); /** - * @return the startKey. + * Return the startKey. */ byte[] getStartKey(); /** - * @return the endKey. + * Return the endKey. */ byte[] getEndKey(); /** - * @return current table name of the region + * Return the current table name of the region. */ TableName getTable(); /** - * @return returns region replica id + * Return the region replica id. */ int getReplicaId(); /** - * @return True if has been split and has daughters. + * Return true if the region has been split and has daughters. */ boolean isSplit(); /** - * @return True if this region is offline. + * Return true if this region is offline. * @deprecated since 3.0.0 and will be removed in 4.0.0 * @see HBASE-25210 */ @@ -216,7 +216,7 @@ public interface RegionInfo extends Comparable { boolean isOffline(); /** - * @return True if this is a split parent region. + * Return true if this is a split parent region. * @deprecated since 3.0.0 and will be removed in 4.0.0, Use {@link #isSplit()} instead. * @see HBASE-25210 */ @@ -224,20 +224,21 @@ public interface RegionInfo extends Comparable { boolean isSplitParent(); /** - * @return true if this region is a meta region. + * Return true if this region is a meta region. */ boolean isMetaRegion(); /** - * @return true if the given inclusive range of rows is fully contained by this region. For - * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will - * return true, but if this is passed ["b","z"] it will return false. + * Return true if the given inclusive range of rows is fully contained by this region. + *

+ * For example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return + * true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey); /** - * @return true if the given row falls in this region. + * Return true if the given row falls in this region. */ boolean containsRow(byte[] row); @@ -254,7 +255,7 @@ static boolean hasEncodedName(final byte[] regionName) { } /** - * @return the encodedName + * Return the encoded region name. */ @InterfaceAudience.Private static String encodeRegionName(final byte[] regionName) { @@ -295,16 +296,16 @@ static String getRegionNameAsString(@CheckForNull RegionInfo ri, byte[] regionNa } /** - * @return Return a String of short, printable names for hris (usually encoded name) - * for us logging. + * Return a String of short, printable names for hris (usually encoded name) for us + * logging. */ static String getShortNameToLog(RegionInfo... hris) { return getShortNameToLog(Arrays.asList(hris)); } /** - * @return Return a String of short, printable names for hris (usually encoded name) - * for us logging. + * Return a String of short, printable names for hris (usually encoded name) for us + * logging. */ static String getShortNameToLog(final List ris) { return ris.stream().map(RegionInfo::getEncodedName).collect(Collectors.toList()).toString(); @@ -371,7 +372,7 @@ public static boolean isEncodedRegionName(byte[] regionName) { } /** - * @return A deserialized {@link RegionInfo} or null if we failed deserialize or passed bytes null + * Return a deserialized {@link RegionInfo} or null if we failed deserialize or passed bytes null */ @InterfaceAudience.Private static RegionInfo parseFromOrNull(final byte[] bytes) { @@ -380,7 +381,7 @@ static RegionInfo parseFromOrNull(final byte[] bytes) { } /** - * @return A deserialized {@link RegionInfo} or null if we failed deserialize or passed bytes null + * Return a deserialized {@link RegionInfo} or null if we failed deserialize or passed bytes null */ @InterfaceAudience.Private static RegionInfo parseFromOrNull(final byte[] bytes, int offset, int len) { @@ -393,8 +394,8 @@ static RegionInfo parseFromOrNull(final byte[] bytes, int offset, int len) { } /** + * Return a deserialized {@link RegionInfo} * @param bytes A pb RegionInfo serialized with a pb magic prefix. - * @return A deserialized {@link RegionInfo} */ @InterfaceAudience.Private static RegionInfo parseFrom(final byte[] bytes) throws DeserializationException { @@ -403,10 +404,10 @@ static RegionInfo parseFrom(final byte[] bytes) throws DeserializationException } /** + * Return a deserialized {@link RegionInfo} * @param bytes A pb RegionInfo serialized with a pb magic prefix. * @param offset starting point in the byte array * @param len length to read on the byte array - * @return A deserialized {@link RegionInfo} */ @InterfaceAudience.Private static RegionInfo parseFrom(final byte[] bytes, int offset, int len) @@ -447,7 +448,7 @@ static boolean areAdjacent(RegionInfo regionA, RegionInfo regionB) { } /** - * @return This instance serialized as protobuf w/ a magic pb prefix. + * Return this instance serialized as protobuf with a magic pb prefix. * @see #parseFrom(byte[]) */ static byte[] toByteArray(RegionInfo ri) { @@ -765,21 +766,21 @@ static List parseDelimitedFrom(final byte[] bytes, final int offset, } /** - * @return True if this is first Region in Table + * Return true if this is first Region in Table */ default boolean isFirst() { return Bytes.equals(getStartKey(), HConstants.EMPTY_START_ROW); } /** - * @return True if this is last Region in Table + * Return true if this is last Region in Table */ default boolean isLast() { return Bytes.equals(getEndKey(), HConstants.EMPTY_END_ROW); } /** - * @return True if region is next, adjacent but 'after' this one. + * Return true if region is next, adjacent but 'after' this one. * @see #isAdjacent(RegionInfo) * @see #areAdjacent(RegionInfo, RegionInfo) */ @@ -788,7 +789,7 @@ default boolean isNext(RegionInfo after) { } /** - * @return True if region is adjacent, either just before or just after this one. + * Return true if region is adjacent, either just before or just after this one. * @see #isNext(RegionInfo) */ default boolean isAdjacent(RegionInfo other) { @@ -796,14 +797,14 @@ default boolean isAdjacent(RegionInfo other) { } /** - * @return True if RegionInfo is degenerate... if startKey > endKey. + * Return true if RegionInfo is degenerate... if startKey > endKey. */ default boolean isDegenerate() { return !isLast() && Bytes.compareTo(getStartKey(), getEndKey()) > 0; } /** - * @return True if an overlap in region range. + * Return true if an overlap in region range. * @see #isDegenerate() */ default boolean isOverlap(RegionInfo other) { @@ -829,7 +830,9 @@ default boolean isOverlap(RegionInfo other) { return Bytes.compareTo(getStartKey(), other.getEndKey()) < 0; } + @Override default int compareTo(RegionInfo other) { return RegionInfo.COMPARATOR.compare(this, other); } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java index ea0228209500..850d37daae15 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java @@ -51,7 +51,7 @@ public class RegionReplicaUtil { /** * Returns the RegionInfo for the given replicaId. RegionInfo's correspond to a range of a table, * but more than one "instance" of the same range can be deployed which are differentiated by the - * replicaId. n * @param replicaId the replicaId to use + * replicaId. * @return an RegionInfo object corresponding to the same range (table, start and end key), but * for the given replicaId. */ @@ -73,19 +73,17 @@ public static RegionInfo getRegionInfoForDefaultReplica(RegionInfo regionInfo) { return getRegionInfoForReplica(regionInfo, DEFAULT_REPLICA_ID); } - /** @return true if this replicaId corresponds to default replica for the region */ + /** Return true if this replicaId corresponds to default replica for the region */ public static boolean isDefaultReplica(int replicaId) { return DEFAULT_REPLICA_ID == replicaId; } - /** @return true if this region is a default replica for the region */ + /** Return true if this region is a default replica for the region */ public static boolean isDefaultReplica(RegionInfo hri) { return hri.getReplicaId() == DEFAULT_REPLICA_ID; } - /** - * Removes the non-default replicas from the passed regions collection n - */ + /** Removes the non-default replicas from the passed regions collection */ public static void removeNonDefaultRegions(Collection regions) { Iterator iterator = regions.iterator(); while (iterator.hasNext()) { @@ -149,7 +147,7 @@ public static List addReplicas(final List regions, int o if ((newReplicaCount - 1) <= 0) { return regions; } - List hRegionInfos = new ArrayList<>((newReplicaCount) * regions.size()); + List hRegionInfos = new ArrayList<>(newReplicaCount * regions.size()); for (RegionInfo ri : regions) { if ( RegionReplicaUtil.isDefaultReplica(ri) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java index 29b092cad883..ed5fd16dab3f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCoprocessorRpcChannelImpl.java @@ -78,11 +78,11 @@ public void callMethod(MethodDescriptor method, RpcController controller, Messag Message responsePrototype, RpcCallback done) { addListener( callerBuilder.action((c, s) -> rpcCall(method, request, responsePrototype, c, s)).call(), - ((r, e) -> { + (r, e) -> { if (e != null) { ((ClientCoprocessorRpcController) controller).setFailed(e); } done.run(r); - })); + }); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java index 66d864be7d49..9ddddf0711aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java @@ -67,9 +67,7 @@ public interface Checker { void reset() throws InterruptedIOException; } - /** - * @return A new checker for evaluating a batch rows. - */ + /** Return a new checker for evaluating a batch rows. */ Checker newChecker(); /** @@ -86,9 +84,7 @@ public interface Checker { */ void decTaskCounters(Collection regions, ServerName sn); - /** - * @return The number of running task. - */ + /** Return The number of running tasks. */ long getNumberOfTasksInProgress(); /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index 46865380f757..1c379286f954 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -672,15 +672,12 @@ public boolean isEmpty() { } /** - * @return the size of the underlying Cell [] + * Return the size of the underlying Cell [] */ public int size() { return this.cells == null ? 0 : this.cells.length; } - /** - * n - */ @Override public String toString() { StringBuilder sb = new StringBuilder(); @@ -896,8 +893,8 @@ public void setStatistics(RegionLoadStats loadStats) { } /** - * @return the associated statistics about the region from which this was returned. Can be - * null if stats are disabled. + * Return the associated statistics about the region from which this was returned. Can be + * null if stats are disabled. */ public RegionLoadStats getStats() { return stats; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java index d6017a1e23fd..20c46ef65265 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java @@ -113,8 +113,6 @@ default Result[] next(int nbRows) throws IOException { */ boolean renewLease(); - /** - * @return the scan metrics, or {@code null} if we do not enable metrics. - */ + /** Return the scan metrics, or {@code null} if we do not enable metrics. */ ScanMetrics getScanMetrics(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java index 179903706cae..6fc1e184e88e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Row.java @@ -28,8 +28,5 @@ public interface Row { Comparator COMPARATOR = (v1, v2) -> Bytes.compareTo(v1.getRow(), v2.getRow()); - /** - * @return The row. - */ byte[] getRow(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java index ba613bb17733..2456337648e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java @@ -22,17 +22,12 @@ /** * Provide a way to access the inner buffer. The purpose is to reduce the elapsed time to move a * large number of elements between collections. - * @param */ @InterfaceAudience.Private public interface RowAccess extends Iterable { - /** - * @return true if there are no elements. - */ + /** Return true if there are no elements. */ boolean isEmpty(); - /** - * @return the number of elements in this list. - */ + /** Return the number of elements in this list. */ int size(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java index 9e9d0f1754e8..a66d0dbfcba6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java @@ -70,6 +70,7 @@ public RowMutations(byte[] row, int initialCapacity) { } /** + * Add a mutation * @param mutation The data to send. * @throws IOException if the row of added mutation doesn't match the original row */ @@ -78,6 +79,7 @@ public RowMutations add(Mutation mutation) throws IOException { } /** + * Add a list of mutations * @param mutations The data to send. * @throws IOException if the row of added mutation doesn't match the original row */ @@ -99,7 +101,7 @@ public byte[] getRow() { } /** - * @return An unmodifiable list of the current mutations. + * Return an unmodifiable list of the current mutations. */ public List getMutations() { return Collections.unmodifiableList(mutations); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 51ac7e8d3140..29c81ee1a489 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -434,6 +434,7 @@ public Scan withStopRow(byte[] stopRow, boolean inclusive) { * {@link #setStartStopRowForPrefixScan(byte[])} instead. */ @Deprecated + @SuppressWarnings("InlineMeSuggester") public Scan setRowPrefixFilter(byte[] rowPrefix) { return setStartStopRowForPrefixScan(rowPrefix); } @@ -531,9 +532,7 @@ public Scan setCaching(int caching) { return this; } - /** - * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)} - */ + /** Return the maximum result size in bytes. See {@link #setMaxResultSize(long)} */ public long getMaxResultSize() { return maxResultSize; } @@ -571,9 +570,7 @@ public Map> getFamilyMap() { return this.familyMap; } - /** - * @return the number of families in familyMap - */ + /** Return the number of families in familyMap */ public int numFamilies() { if (hasFamilies()) { return this.familyMap.size(); @@ -581,16 +578,12 @@ public int numFamilies() { return 0; } - /** - * @return true if familyMap is non empty, false otherwise - */ + /** Return true if familyMap is non empty, false otherwise */ public boolean hasFamilies() { return !this.familyMap.isEmpty(); } - /** - * @return the keys of the familyMap - */ + /** Return the keys of the familyMap */ public byte[][] getFamilies() { if (hasFamilies()) { return this.familyMap.keySet().toArray(new byte[0][0]); @@ -598,88 +591,65 @@ public byte[][] getFamilies() { return null; } - /** - * @return the startrow - */ + /** Return the startrow */ public byte[] getStartRow() { return this.startRow; } - /** - * @return if we should include start row when scan - */ + /** Return if we should include start row when scan */ public boolean includeStartRow() { return includeStartRow; } - /** - * @return the stoprow - */ + /** Return the stop row */ public byte[] getStopRow() { return this.stopRow; } - /** - * @return if we should include stop row when scan - */ + /** Return if we should include the stop row when scanning */ public boolean includeStopRow() { return includeStopRow; } - /** - * @return the max number of versions to fetch - */ + /** Return the max number of versions to fetch */ public int getMaxVersions() { return this.maxVersions; } - /** - * @return maximum number of values to return for a single call to next() - */ + /** Return the maximum number of values to return for a single call to next */ public int getBatch() { return this.batch; } - /** - * @return maximum number of values to return per row per CF - */ + /** Return the maximum number of values to return per row per family */ public int getMaxResultsPerColumnFamily() { return this.storeLimit; } - /** - * Method for retrieving the scan's offset per row per column family (#kvs to be skipped) - * @return row offset - */ + /** Return the scan's offset per row per column family (#kvs to be skipped) */ public int getRowOffsetPerColumnFamily() { return this.storeOffset; } /** - * @return caching the number of rows fetched when calling next on a scanner + * Return the the number of rows that will be fetched and cached when calling next on a scanner */ public int getCaching() { return this.caching; } - /** - * n - */ + /** Return the time range */ public TimeRange getTimeRange() { return this.tr; } - /** - * n - */ + /** Return the filter */ @Override public Filter getFilter() { return filter; } - /** - * @return true is a filter has been specified, false if not - */ + /** Return true is a filter has been specified, false if not */ public boolean hasFilter() { return filter != null; } @@ -736,9 +706,9 @@ public Scan setAllowPartialResults(final boolean allowPartialResults) { } /** - * @return true when the constructor of this scan understands that the results they will see may - * only represent a partial portion of a row. The entire row would be retrieved by - * subsequent calls to {@link ResultScanner#next()} + * Return true when the constructor of this scan understands that the results they will see may + * only represent a partial portion of a row. The entire row would be retrieved by subsequent + * calls to {@link ResultScanner#next()} */ public boolean getAllowPartialResults() { return allowPartialResults; @@ -840,9 +810,7 @@ public Scan setRaw(boolean raw) { return this; } - /** - * @return True if this Scan is in "raw" mode. - */ + /** Return true if this Scan is in "raw" mode. */ public boolean isRaw() { byte[] attr = getAttribute(RAW_ATTR); return attr == null ? false : Bytes.toBoolean(attr); @@ -902,9 +870,7 @@ public Scan setScanMetricsEnabled(final boolean enabled) { return this; } - /** - * @return True if collection of scan metrics is enabled. For advanced users. - */ + /** Return true if collection of scan metrics is enabled. For advanced users. */ public boolean isScanMetricsEnabled() { byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); return attr == null ? false : Bytes.toBoolean(attr); @@ -915,6 +881,7 @@ public Boolean isAsyncPrefetch() { } /** + * Disable or enable async prefetch. * @deprecated Since 3.0.0, will be removed in 4.0.0. After building sync client upon async * client, the implementation is always 'async prefetch', so this flag is useless now. */ @@ -924,9 +891,7 @@ public Scan setAsyncPrefetch(boolean asyncPrefetch) { return this; } - /** - * @return the limit of rows for this scan - */ + /** Return the limit of rows for this scan */ public int getLimit() { return limit; } @@ -958,9 +923,7 @@ public enum ReadType { PREAD } - /** - * @return the read type for this scan - */ + /** Return the read type for this scan */ public ReadType getReadType() { return readType; } @@ -976,24 +939,18 @@ public Scan setReadType(ReadType readType) { return this; } - /** - * Get the mvcc read point used to open a scanner. - */ + /** Return the mvcc read point used to open the scanner. */ long getMvccReadPoint() { return mvccReadPoint; } - /** - * Set the mvcc read point used to open a scanner. - */ + /** Set the mvcc read point used to open the scanner. */ Scan setMvccReadPoint(long mvccReadPoint) { this.mvccReadPoint = mvccReadPoint; return this; } - /** - * Set the mvcc read point to -1 which means do not use it. - */ + /** Set the mvcc read point to -1 which means do not use it. */ Scan resetMvccReadPoint() { return setMvccReadPoint(-1L); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java index b574b2c2bd5e..de44f8563d35 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScanResultConsumer.java @@ -30,6 +30,7 @@ public interface ScanResultConsumer extends ScanResultConsumerBase { /** + * Indicate if you want to terminate the scan process. * @param result the data fetched from HBase service. * @return {@code false} if you want to terminate the scan process. Otherwise {@code true} */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java index 501f412bc575..29647892a999 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServiceCaller.java @@ -28,22 +28,16 @@ * Usually, it is just a simple lambda expression, like: * *

- * 
- * (stub, controller, rpcCallback) -> {
- *   XXXRequest request = ...; // prepare the request
- *   stub.xxx(controller, request, rpcCallback);
- * }
- * 
- * 
+ * (stub, controller, rpcCallback) -> { XXXRequest request = ...; // prepare the request + * stub.xxx(controller, request, rpcCallback); } * + *
  * And if already have the {@code request}, the lambda expression will be:
  *
  * 
- * 
  * (stub, controller, rpcCallback) -> stub.xxx(controller, request, rpcCallback)
- * 
- * 
* + *
  * @param  the type of the protobuf Service you want to call.
  * @param  the type of the return value.
  */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java
index 3311539c2614..b1460c0b116c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SlowLogParams.java
@@ -62,13 +62,10 @@ public boolean equals(Object o) {
     if (this == o) {
       return true;
     }
-
-    if (o == null || getClass() != o.getClass()) {
+    if (!(o instanceof SlowLogParams)) {
       return false;
     }
-
     SlowLogParams that = (SlowLogParams) o;
-
     return new EqualsBuilder().append(regionName, that.regionName).append(params, that.params)
       .isEquals();
   }
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
index 2dfb2fa7a199..aebcf56a34c0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotDescription.java
@@ -28,6 +28,7 @@
  * The POJO equivalent of HBaseProtos.SnapshotDescription
  */
 @InterfaceAudience.Public
+@SuppressWarnings("InlineMeSuggester")
 public class SnapshotDescription {
   private final String name;
   private final TableName table;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 0f93ab21a2c1..5271b0b58f07 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -30,7 +30,6 @@
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
@@ -302,11 +301,13 @@ default CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
   interface CheckAndMutateBuilder {
 
     /**
+     * Add a column qualifier to check.
      * @param qualifier column qualifier to check.
      */
     CheckAndMutateBuilder qualifier(byte[] qualifier);
 
     /**
+     * Add a time range to check
      * @param timeRange timeRange to check
      */
     CheckAndMutateBuilder timeRange(TimeRange timeRange);
@@ -325,24 +326,28 @@ default CheckAndMutateBuilder ifEquals(byte[] value) {
     }
 
     /**
+     * Check for a match.
      * @param compareOp comparison operator to use
      * @param value     the expected value
      */
     CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value);
 
     /**
+     * Provide a Put to commit if the check succeeds.
      * @param put data to put if check succeeds
      * @return {@code true} if the new put was executed, {@code false} otherwise.
      */
     boolean thenPut(Put put) throws IOException;
 
     /**
+     * Provide a Delete to commit if the check succeeds.
      * @param delete data to delete if check succeeds
      * @return {@code true} if the new delete was executed, {@code false} otherwise.
      */
     boolean thenDelete(Delete delete) throws IOException;
 
     /**
+     * Provide a RowMutations to commit if the check succeeds.
      * @param mutation mutations to perform if check succeeds
      * @return true if the new mutation was executed, false otherwise.
      */
@@ -355,13 +360,8 @@ default CheckAndMutateBuilder ifEquals(byte[] value) {
    * 

* Use the returned {@link CheckAndMutateWithFilterBuilder} to construct your request and then * execute it. This is a fluent style API, the code is like: - * - *

-   * 
-   * table.checkAndMutate(row, filter).thenPut(put);
-   * 
-   * 
- * + *

+ * {@code table.checkAndMutate(row, filter).thenPut(put); } * @deprecated Since 3.0.0, will be removed in 4.0.0. For internal test use only, do not use it * any more. */ @@ -379,23 +379,27 @@ default CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter interface CheckAndMutateWithFilterBuilder { /** + * Add a time range to check * @param timeRange timeRange to check */ CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange); /** + * Provide a Put to commit if the check succeeds. * @param put data to put if check succeeds * @return {@code true} if the new put was executed, {@code false} otherwise. */ boolean thenPut(Put put) throws IOException; /** + * Provide a Delete to commit if the check succeeds. * @param delete data to delete if check succeeds * @return {@code true} if the new delete was executed, {@code false} otherwise. */ boolean thenDelete(Delete delete) throws IOException; /** + * Provide a RowMutations to commit if the check succeeds. * @param mutation mutations to perform if check succeeds * @return true if the new mutation was executed, false otherwise. */ @@ -445,7 +449,7 @@ default Result mutateRow(final RowMutations rm) throws IOException { * write operations to a row are synchronized, and readers are guaranteed to see this operation * fully completed. * @param append object that specifies the columns and values to be appended - * @throws IOException e + * @throws IOException if a remote or network exception occurs. * @return values of columns after the append operation (maybe null) */ default Result append(final Append append) throws IOException { @@ -460,7 +464,7 @@ default Result append(final Append append) throws IOException { * fully completed. * @param increment object that specifies the columns and amounts to be used for the increment * operations - * @throws IOException e + * @throws IOException if a remote or network exception occurs. * @return values of columns after the increment */ default Result increment(final Increment increment) throws IOException { @@ -531,7 +535,6 @@ default void close() throws IOException { * used to access a published coprocessor {@link Service} using standard protobuf service * invocations: *

- *

* *
    * CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
@@ -542,7 +545,6 @@ default void close() throws IOException {
    * MyCallResponse response = service.myCall(null, request);
    * 
* - *
* @param row The row key used to identify the remote region location * @return A CoprocessorRpcChannel instance * @deprecated since 3.0.0, will removed in 4.0.0. This is too low level, please stop using it any @@ -660,7 +662,7 @@ default Map batchCoprocessorService( final Map results = Collections.synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR)); batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, - new Callback() { + new Batch.Callback() { @Override public void update(byte[] region, byte[] row, R result) { if (region != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java index ea60b07d63eb..6d49c313ea86 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java @@ -141,7 +141,7 @@ public interface TableDescriptor { int getPriority(); /** - * @return Returns the configured replicas per region + * Return the configured number of replicas per region. */ int getRegionReplication(); @@ -188,7 +188,7 @@ public interface TableDescriptor { String getValue(String key); /** - * @return Getter for fetching an unmodifiable map. + * Return a getter for fetching an unmodifiable map. */ Map getValues(); @@ -207,7 +207,7 @@ public interface TableDescriptor { boolean hasColumnFamily(final byte[] name); /** - * @return true if the read-replicas memstore replication is enabled. + * Return true if the read-replicas memstore replication is enabled. */ boolean hasRegionMemStoreReplication(); @@ -273,8 +273,8 @@ public interface TableDescriptor { boolean isReadOnly(); /** - * @return Name of this table and then a map of all of the column family descriptors (with only - * the non-default column family attributes) + * Return the name of this table and then a map of all of the column family descriptors (with only + * the non-default column family attributes) */ String toStringCustomizedValues(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index cf12923326c1..9b737d33ae5f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -143,8 +143,6 @@ public class TableDescriptorBuilder { private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); - private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY = - new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); /** * Used by shell/rest interface to access this metadata attribute which denotes if the table * should be treated by region normalizer. @@ -303,6 +301,7 @@ public static PrettyPrinter.Unit getUnit(String key) { private final ModifyableTableDescriptor desc; /** + * Serialize the descriptor to a byte array * @param desc The table descriptor to serialize * @return This instance serialized with pb with pb magic prefix */ @@ -317,7 +316,7 @@ public static byte[] toByteArray(TableDescriptor desc) { * The input should be created by {@link #toByteArray}. * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix * @return This instance serialized with pb with pb magic prefix - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException + * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred */ public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { return ModifyableTableDescriptor.parseFrom(pbBytes); @@ -1083,9 +1082,7 @@ public String toStringCustomizedValues() { return s.toString(); } - /** - * @return map of all table attributes formatted into string. - */ + /** Return a map of all table attributes formatted as strings. */ public String toStringTableAttributes() { return getValues(true).toString(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java index d4db9eb49e67..e1565f18159a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableOverAsyncTable.java @@ -47,8 +47,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext; -import org.apache.hadoop.hbase.client.coprocessor.Batch.Call; -import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; +import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.client.trace.TableOperationSpanBuilder; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; @@ -143,8 +142,8 @@ public void batch(List actions, Object[] results) throws IOExcept } @Override - public void batchCallback(List actions, Object[] results, Callback callback) - throws IOException, InterruptedException { + public void batchCallback(List actions, Object[] results, + Batch.Callback callback) throws IOException, InterruptedException { ConcurrentLinkedQueue errors = new ConcurrentLinkedQueue<>(); CountDownLatch latch = new CountDownLatch(actions.size()); AsyncTableRegionLocator locator = conn.getRegionLocator(getName()); @@ -467,7 +466,7 @@ private interface StubCall { } private void coprocessorService(String serviceName, byte[] startKey, byte[] endKey, - Callback callback, StubCall call) throws Throwable { + Batch.Callback callback, StubCall call) throws Throwable { // get regions covered by the row range ExecutorService pool = Context.current().wrap(this.poolSupplier.get()); List keys = getStartKeysInRange(startKey, endKey); @@ -509,7 +508,8 @@ private void coprocessorService(String serviceName, byte[] startKey, byte[] @Override public void coprocessorService(Class service, byte[] startKey, - byte[] endKey, Call callable, Callback callback) throws ServiceException, Throwable { + byte[] endKey, Batch.Call callable, Batch.Callback callback) + throws ServiceException, Throwable { final Supplier supplier = new TableOperationSpanBuilder(conn) .setTableName(table.getName()).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); TraceUtil.trace(() -> { @@ -526,8 +526,8 @@ public void coprocessorService(Class service, byte[] s @SuppressWarnings("unchecked") @Override public void batchCoprocessorService(MethodDescriptor methodDescriptor, - Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) - throws ServiceException, Throwable { + Message request, byte[] startKey, byte[] endKey, R responsePrototype, + Batch.Callback callback) throws ServiceException, Throwable { final Supplier supplier = new TableOperationSpanBuilder(conn) .setTableName(table.getName()).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); TraceUtil.trace(() -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java index ffd8cf8409d4..57b20f9ce62e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java @@ -95,42 +95,42 @@ public HBaseProtos.TableState.State convert() { private final State state; /** - * @return True if table is {@link State#ENABLED}. + * Return true if table is {@link State#ENABLED}. */ public boolean isEnabled() { return isInStates(State.ENABLED); } /** - * @return True if table is {@link State#ENABLING}. + * Return true if table is {@link State#ENABLING}. */ public boolean isEnabling() { return isInStates(State.ENABLING); } /** - * @return True if {@link State#ENABLED} or {@link State#ENABLING} + * Return true if {@link State#ENABLED} or {@link State#ENABLING} */ public boolean isEnabledOrEnabling() { return isInStates(State.ENABLED, State.ENABLING); } /** - * @return True if table is disabled. + * Return true if table is disabled. */ public boolean isDisabled() { return isInStates(State.DISABLED); } /** - * @return True if table is disabling. + * Return true if table is disabling. */ public boolean isDisabling() { return isInStates(State.DISABLING); } /** - * @return True if {@link State#DISABLED} or {@link State#DISABLED} + * Return true if {@link State#DISABLED} or {@link State#DISABLED} */ public boolean isDisabledOrDisabling() { return isInStates(State.DISABLED, State.DISABLING); @@ -147,7 +147,7 @@ public TableState(TableName tableName, State state) { } /** - * @return table state + * Return the table state. */ public State getState() { return state; @@ -223,15 +223,19 @@ public boolean isInStates(State... target) { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - + if (this == o) { + return true; + } + if (!(o instanceof TableState)) { + return false; + } TableState that = (TableState) o; - - if (state != that.state) return false; - if (tableName != null ? !tableName.equals(that.tableName) : that.tableName != null) + if (state != that.state) { return false; - + } + if (tableName != null ? !tableName.equals(that.tableName) : that.tableName != null) { + return false; + } return true; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java index ee66bcada1ba..3a6467d40361 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicy.java @@ -33,7 +33,7 @@ public interface ClientBackoffPolicy { public static final String BACKOFF_POLICY_CLASS = "hbase.client.statistics.backoff-policy"; /** - * @return the number of ms to wait on the client based on the + * Return the number of milliseconds to wait on the client based on the provided statistics. */ public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistics stats); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java index a786702b1693..5b8873a3af82 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ClientBackoffPolicyFactory.java @@ -22,15 +22,11 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @InterfaceAudience.Private @InterfaceStability.Evolving public final class ClientBackoffPolicyFactory { - private static final Logger LOG = LoggerFactory.getLogger(ClientBackoffPolicyFactory.class); - private ClientBackoffPolicyFactory() { } @@ -42,9 +38,7 @@ public static ClientBackoffPolicy create(Configuration conf) { new Class[] { Configuration.class }, new Object[] { conf }); } - /** - * Default backoff policy that doesn't create any backoff for the client, regardless of load - */ + /** Default backoff policy that doesn't create any backoff for the client, regardless of load. */ public static class NoBackoffPolicy implements ClientBackoffPolicy { public NoBackoffPolicy(Configuration conf) { // necessary to meet contract diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java index aa84207e1ed1..0e3339948eaa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ExponentialClientBackoffPolicy.java @@ -21,8 +21,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -33,8 +31,6 @@ @InterfaceAudience.Public public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy { - private static final Logger LOG = LoggerFactory.getLogger(ExponentialClientBackoffPolicy.class); - private static final long ONE_MINUTE = 60 * 1000; public static final long DEFAULT_MAX_BACKOFF = 5 * ONE_MINUTE; public static final String MAX_BACKOFF_KEY = "hbase.client.exponential-backoff.max"; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java index 900f96440dca..1e42be4baad8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/BigDecimalColumnInterpreter.java @@ -80,7 +80,7 @@ public BigDecimal getMaxValue() { @Override public BigDecimal increment(BigDecimal bd) { - return bd == null ? null : (bd.add(BigDecimal.ONE)); + return bd == null ? null : bd.add(BigDecimal.ONE); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java index 4145a348b08c..e789a9fa54f6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java @@ -107,6 +107,7 @@ public static String convertToString(Map * Convert string to TableCFs Object. This is only for read TableCFs information from TableCF * node. Input String Format: ns1.table1:cf1,cf2;ns2.table2:cfA,cfB;ns3.table3. */ + @SuppressWarnings("StringSplitter") public static ReplicationProtos.TableCF[] convert(String tableCFsConfig) { if (tableCFsConfig == null || tableCFsConfig.trim().length() == 0) { return null; @@ -241,6 +242,7 @@ public static Map> convert2Map(ReplicationProtos.TableCF } /** + * Parse the serialized representation of a peer * @param bytes Content of a peer znode. * @return ClusterKey parsed from the passed bytes. * @throws DeserializationException deserialization exception @@ -384,6 +386,7 @@ public static ReplicationProtos.ReplicationPeer convert(ReplicationPeerConfig pe } /** + * Serialize the peer configuration and return it as a byte array. * @param peerConfig peer config of replication peer * @return Serialized protobuf of peerConfig with pb magic prefix prepended suitable * for use as content of a this.peersZNode; i.e. the content of PEER_ID znode under diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java index 0e24e0c98143..af9f5d9ff444 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java @@ -40,11 +40,6 @@ * {@link ColumnInterpreter#castToReturnType(Object)} which takes a <T> type and returns a * <S> type. The AggregateIm>lementation uses PB messages to initialize the user's * ColumnInterpreter implementation, and for sending the responses back to AggregationClient. - * @param T Cell value data type - * @param S Promoted data type - * @param P PB message that is used to transport initializer specific bytes - * @param Q PB message that is used to transport Cell (<T>) instance - * @param R PB message that is used to transport Promoted (<S>) instance */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java index 6d28f3288fdc..1c845fe3db16 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java @@ -67,37 +67,27 @@ public PreemptiveFastFailException(long count, long timeOfFirstFailureMilliSec, this.guaranteedClientSideOnly = guaranteedClientSideOnly; } - /** - * @return time of the fist failure - */ + /** Return time of the first failure */ public long getFirstFailureAt() { return timeOfFirstFailureMilliSec; } - /** - * @return time of the latest attempt - */ + /** Return time of the latest attempt */ public long getLastAttemptAt() { return timeOfLatestAttemptMilliSec; } - /** - * @return failure count - */ + /** Return the failure count */ public long getFailureCount() { return failureCount; } - /** - * @return true if operation was attempted by server, false otherwise. - */ + /** Return true if operation was attempted by server, false otherwise. */ public boolean wasOperationAttemptedByServer() { return false; } - /** - * @return true if we know no mutation made it to the server, false otherwise. - */ + /** Return true if we know no mutation made it to the server, false otherwise. */ public boolean isGuaranteedClientSideOnly() { return guaranteedClientSideOnly; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java index 347c6b987a15..bef80b2cb82d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java @@ -20,8 +20,6 @@ import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Subclass if the server knows the region is now on another server. This allows the client to call @@ -30,7 +28,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class RegionOpeningException extends NotServingRegionException { - private static final Logger LOG = LoggerFactory.getLogger(RegionOpeningException.class); + private static final long serialVersionUID = -7232903522310558395L; public RegionOpeningException(String message) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java index bfd285975ff4..6caef8b35ca4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BigDecimalComparator.java @@ -45,12 +45,12 @@ public BigDecimalComparator(BigDecimal value) { @Override public boolean equals(Object obj) { - if (obj == null || !(obj instanceof BigDecimalComparator)) { - return false; - } if (this == obj) { return true; } + if (!(obj instanceof BigDecimalComparator)) { + return false; + } BigDecimalComparator bdc = (BigDecimalComparator) obj; return this.bigDecimal.equals(bdc.bigDecimal); } @@ -72,9 +72,7 @@ public int compareTo(ByteBuffer value, int offset, int length) { return this.bigDecimal.compareTo(that); } - /** - * @return The comparator serialized using pb - */ + /** Return The comparator serialized using pb */ @Override public byte[] toByteArray() { ComparatorProtos.BigDecimalComparator.Builder builder = @@ -84,9 +82,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the comparator * @param pbBytes A pb serialized {@link BigDecimalComparator} instance * @return An instance of {@link BigDecimalComparator} made from bytes - * @throws DeserializationException A deserialization exception + * @throws DeserializationException if an error occurred * @see #toByteArray */ public static BigDecimalComparator parseFrom(final byte[] pbBytes) @@ -102,10 +101,10 @@ public static BigDecimalComparator parseFrom(final byte[] pbBytes) } /** - * @param other the other comparator - * @return true if and only if the fields of the comparator that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. */ + @SuppressWarnings("ReferenceEquality") boolean areSerializedFieldsEqual(BigDecimalComparator other) { if (other == this) { return true; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java index 0c8274a86110..d12a274686c0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComparator.java @@ -54,9 +54,7 @@ public int compareTo(ByteBuffer value, int offset, int length) { return ByteBufferUtils.compareTo(this.value, 0, this.value.length, value, offset, length); } - /** - * @return The comparator serialized using pb - */ + /** Return the comparator serialized using pb */ @Override public byte[] toByteArray() { ComparatorProtos.BinaryComparator.Builder builder = @@ -66,9 +64,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this comparator * @param pbBytes A pb serialized {@link BinaryComparator} instance - * @return An instance of {@link BinaryComparator} made from bytes n * @see - * #toByteArray + * @return An instance of {@link BinaryComparator} made from bytes + * @see #toByteArray */ public static BinaryComparator parseFrom(final byte[] pbBytes) throws DeserializationException { ComparatorProtos.BinaryComparator proto; @@ -81,8 +80,8 @@ public static BinaryComparator parseFrom(final byte[] pbBytes) throws Deserializ } /** - * n * @return true if and only if the fields of the comparator that are serialized are equal to - * the corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java index 2546227fce35..355af6e7c58f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryComponentComparator.java @@ -75,9 +75,7 @@ public int hashCode() { return result; } - /** - * @return The comparator serialized using pb - */ + /** Return the comparator serialized using pb */ @Override public byte[] toByteArray() { ComparatorProtos.BinaryComponentComparator.Builder builder = @@ -88,6 +86,7 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this comparator * @param pbBytes A pb serialized {@link BinaryComponentComparator} instance * @return An instance of {@link BinaryComponentComparator} made from bytes * @throws DeserializationException DeserializationException @@ -105,9 +104,8 @@ public static BinaryComponentComparator parseFrom(final byte[] pbBytes) } /** - * @param other paramemter to compare against - * @return true if and only if the fields of the comparator that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java index f97fd070be6a..495b9ce73467 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.java @@ -58,9 +58,7 @@ public int compareTo(ByteBuffer value, int offset, int length) { return ByteBufferUtils.compareTo(this.value, 0, this.value.length, value, offset, length); } - /** - * @return The comparator serialized using pb - */ + /** Return the comparator serialized using pb */ @Override public byte[] toByteArray() { ComparatorProtos.BinaryPrefixComparator.Builder builder = @@ -70,9 +68,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this comparator * @param pbBytes A pb serialized {@link BinaryPrefixComparator} instance - * @return An instance of {@link BinaryPrefixComparator} made from bytes n * @see - * #toByteArray + * @return An instance of {@link BinaryPrefixComparator} made from bytes + * @see #toByteArray */ public static BinaryPrefixComparator parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -86,8 +85,8 @@ public static BinaryPrefixComparator parseFrom(final byte[] pbBytes) } /** - * n * @return true if and only if the fields of the comparator that are serialized are equal to - * the corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java index 15ca8890abac..6a5bb534b114 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/BitComparator.java @@ -57,16 +57,12 @@ public BitComparator(byte[] value, BitwiseOp bitOperator) { this.bitOperator = bitOperator; } - /** - * @return the bitwise operator - */ + /** Return the bitwise operator */ public BitwiseOp getOperator() { return bitOperator; } - /** - * @return The comparator serialized using pb - */ + /** Return the comparator serialized using pb */ @Override public byte[] toByteArray() { ComparatorProtos.BitComparator.Builder builder = ComparatorProtos.BitComparator.newBuilder(); @@ -78,8 +74,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this comparator * @param pbBytes A pb serialized {@link BitComparator} instance - * @return An instance of {@link BitComparator} made from bytes n * @see #toByteArray + * @return An instance of {@link BitComparator} made from bytes + * @see #toByteArray */ public static BitComparator parseFrom(final byte[] pbBytes) throws DeserializationException { ComparatorProtos.BitComparator proto; @@ -93,8 +91,8 @@ public static BitComparator parseFrom(final byte[] pbBytes) throws Deserializati } /** - * n * @return true if and only if the fields of the comparator that are serialized are equal to - * the corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java index cfaf8c279930..6d78994d4c00 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.java @@ -78,9 +78,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new ColumnCountGetFilter(limit); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.ColumnCountGetFilter.Builder builder = @@ -90,6 +88,7 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this filter * @param pbBytes A pb serialized {@link ColumnCountGetFilter} instance * @return An instance of {@link ColumnCountGetFilter} made from bytes * @see #toByteArray @@ -106,15 +105,17 @@ public static ColumnCountGetFilter parseFrom(final byte[] pbBytes) } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ColumnCountGetFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof ColumnCountGetFilter)) { + return false; + } ColumnCountGetFilter other = (ColumnCountGetFilter) o; return this.getLimit() == other.getLimit(); } @@ -126,7 +127,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java index 31f607c22cc8..e9cd67973748 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.java @@ -78,23 +78,14 @@ public ColumnPaginationFilter(final int limit, final byte[] columnOffset) { this.columnOffset = columnOffset; } - /** - * n - */ public int getLimit() { return limit; } - /** - * n - */ public int getOffset() { return offset; } - /** - * n - */ public byte[] getColumnOffset() { return columnOffset; } @@ -151,9 +142,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new ColumnPaginationFilter(limit, offset); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.ColumnPaginationFilter.Builder builder = @@ -169,9 +158,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this filter. * @param pbBytes A pb serialized {@link ColumnPaginationFilter} instance - * @return An instance of {@link ColumnPaginationFilter} made from bytes n * @see - * #toByteArray + * @return An instance of {@link ColumnPaginationFilter} made from bytes + * @see #toByteArray */ public static ColumnPaginationFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -188,15 +178,17 @@ public static ColumnPaginationFilter parseFrom(final byte[] pbBytes) } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ColumnPaginationFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof ColumnPaginationFilter)) { + return false; + } ColumnPaginationFilter other = (ColumnPaginationFilter) o; if (this.columnOffset != null) { return this.getLimit() == other.getLimit() @@ -216,7 +208,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java index d883c449017c..c196656e0261 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.java @@ -102,9 +102,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new ColumnPrefixFilter(columnPrefix); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb. */ @Override public byte[] toByteArray() { FilterProtos.ColumnPrefixFilter.Builder builder = FilterProtos.ColumnPrefixFilter.newBuilder(); @@ -113,9 +111,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the filter. * @param pbBytes A pb serialized {@link ColumnPrefixFilter} instance * @return An instance of {@link ColumnPrefixFilter} made from bytes - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException + * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred * @see #toByteArray */ public static ColumnPrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -129,15 +128,17 @@ public static ColumnPrefixFilter parseFrom(final byte[] pbBytes) throws Deserial } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ColumnPrefixFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof ColumnPrefixFilter)) { + return false; + } ColumnPrefixFilter other = (ColumnPrefixFilter) o; return Bytes.equals(this.getPrefix(), other.getPrefix()); } @@ -154,7 +155,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java index 46465ac6d1f8..51f9a1577b3a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java @@ -65,44 +65,32 @@ public ColumnRangeFilter(final byte[] minColumn, boolean minColumnInclusive, this.maxColumnInclusive = maxColumnInclusive; } - /** - * @return if min column range is inclusive. - */ + /** Return true if min column range is inclusive. */ public boolean isMinColumnInclusive() { return minColumnInclusive; } - /** - * @return if max column range is inclusive. - */ + /** Return true if max column range is inclusive. */ public boolean isMaxColumnInclusive() { return maxColumnInclusive; } - /** - * @return the min column range for the filter - */ + /** Return the min column range for the filter */ public byte[] getMinColumn() { return this.minColumn; } - /** - * @return true if min column is inclusive, false otherwise - */ + /** Return true if min column is inclusive, false otherwise */ public boolean getMinColumnInclusive() { return this.minColumnInclusive; } - /** - * @return the max column range for the filter - */ + /** Return the max column range for the filte */ public byte[] getMaxColumn() { return this.maxColumn; } - /** - * @return true if max column is inclusive, false otherwise - */ + /** Return true if max column is inclusive, false otherwise */ public boolean getMaxColumnInclusive() { return this.maxColumnInclusive; } @@ -155,9 +143,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new ColumnRangeFilter(minColumn, minColumnInclusive, maxColumn, maxColumnInclusive); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.ColumnRangeFilter.Builder builder = FilterProtos.ColumnRangeFilter.newBuilder(); @@ -171,9 +157,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this filter. * @param pbBytes A pb serialized {@link ColumnRangeFilter} instance - * @return An instance of {@link ColumnRangeFilter} made from bytes n * @see - * #toByteArray + * @return An instance of {@link ColumnRangeFilter} made from bytes + * @see #toByteArray */ public static ColumnRangeFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ColumnRangeFilter proto; @@ -189,9 +176,8 @@ public static ColumnRangeFilter parseFrom(final byte[] pbBytes) throws Deseriali } /** - * @param o filter to serialize. - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { @@ -222,7 +208,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java index 0074fe40a3a4..638afd035345 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java @@ -68,30 +68,22 @@ public ColumnValueFilter(final byte[] family, final byte[] qualifier, final Comp this.comparator = Preconditions.checkNotNull(comparator, "Comparator should not be null"); } - /** - * n - */ + /** Return the comparison operator */ public CompareOperator getCompareOperator() { return op; } - /** - * @return the comparator - */ + /** Return the comparator */ public ByteArrayComparable getComparator() { return comparator; } - /** - * @return the column family - */ + /** Return the column family */ public byte[] getFamily() { return family; } - /** - * @return the qualifier - */ + /** Return the qualifier */ public byte[] getQualifier() { return qualifier; } @@ -161,9 +153,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new ColumnValueFilter(family, qualifier, operator, comparator); } - /** - * @return A pb instance to represent this instance. - */ + /** Return a pb instance to represent this instance. */ FilterProtos.ColumnValueFilter convert() { FilterProtos.ColumnValueFilter.Builder builder = FilterProtos.ColumnValueFilter.newBuilder(); @@ -213,7 +203,6 @@ boolean areSerializedFieldsEqual(Filter o) { } else if (!(o instanceof ColumnValueFilter)) { return false; } - ColumnValueFilter other = (ColumnValueFilter) o; return Bytes.equals(this.getFamily(), other.getFamily()) && Bytes.equals(this.getQualifier(), other.getQualifier()) @@ -235,7 +224,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java index 1f55f8480459..7ee89821df97 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java @@ -69,9 +69,7 @@ public CompareOperator getCompareOperator() { return op; } - /** - * @return the comparator - */ + /** Return the comparator */ public ByteArrayComparable getComparator() { return comparator; } @@ -138,7 +136,7 @@ static boolean compare(final CompareOperator op, int compareResult) { } } - // returns an array of heterogeneous objects + /** Returns an array of heterogeneous objects */ public static ArrayList extractArguments(ArrayList filterArguments) { Preconditions.checkArgument(filterArguments.size() == 2, "Expected 2 but got: %s", filterArguments.size()); @@ -158,9 +156,7 @@ public static ArrayList extractArguments(ArrayList filterArgumen return arguments; } - /** - * @return A pb instance to represent this instance. - */ + /** Return a pb instance to represent this instance. */ FilterProtos.CompareFilter convert() { FilterProtos.CompareFilter.Builder builder = FilterProtos.CompareFilter.newBuilder(); HBaseProtos.CompareType compareOp = CompareType.valueOf(this.op.name()); @@ -170,13 +166,17 @@ FilterProtos.CompareFilter convert() { } /** - * n * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof CompareFilter)) return false; + if (o == this) { + return true; + } + if (!(o instanceof CompareFilter)) { + return false; + } CompareFilter other = (CompareFilter) o; return this.getCompareOperator().equals(other.getCompareOperator()) && (this.getComparator() == other.getComparator() @@ -191,7 +191,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java index 1f453c6f678d..039b63d860dc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/DependentColumnFilter.java @@ -92,23 +92,17 @@ public DependentColumnFilter(final byte[] family, final byte[] qualifier, this(family, qualifier, dropDependentColumn, CompareOperator.NO_OP, null); } - /** - * @return the column family - */ + /** Return the column family */ public byte[] getFamily() { return this.columnFamily; } - /** - * @return the column qualifier - */ + /** Return the column qualifier */ public byte[] getQualifier() { return this.columnQualifier; } - /** - * @return true if we should drop the dependent column, false otherwise - */ + /** Return true if we should drop the dependent column, false otherwise */ public boolean dropDependentColumn() { return this.dropDependentColumn; } @@ -188,9 +182,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments } } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.DependentColumnFilter.Builder builder = @@ -207,9 +199,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this filter. * @param pbBytes A pb serialized {@link DependentColumnFilter} instance - * @return An instance of {@link DependentColumnFilter} made from bytes n * @see - * #toByteArray + * @return An instance of {@link DependentColumnFilter} made from bytes + * @see #toByteArray */ public static DependentColumnFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -236,16 +229,19 @@ public static DependentColumnFilter parseFrom(final byte[] pbBytes) } /** - * n * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof DependentColumnFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof DependentColumnFilter)) { + return false; + } DependentColumnFilter other = (DependentColumnFilter) o; return other != null && super.areSerializedFieldsEqual(other) && Bytes.equals(this.getFamily(), other.getFamily()) @@ -263,7 +259,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java index 4e682eb1d37b..792bce3c6920 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java @@ -73,9 +73,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new FamilyFilter(compareOp, comparator); } - /** - * @return The filter serialized using pb - */ + /** Return The filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.FamilyFilter.Builder builder = FilterProtos.FamilyFilter.newBuilder(); @@ -84,6 +82,7 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the filter. * @param pbBytes A pb serialized {@link FamilyFilter} instance * @return An instance of {@link FamilyFilter} made from bytes n * @see #toByteArray */ @@ -108,21 +107,24 @@ public static FamilyFilter parseFrom(final byte[] pbBytes) throws Deserializatio } /** - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof FamilyFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof FamilyFilter)) { + return false; + } FamilyFilter other = (FamilyFilter) o; return super.areSerializedFieldsEqual(other); } @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java index 2c623306ba0b..a7ba35d401f2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -207,6 +207,7 @@ public enum ReturnCode { * @param pbBytes A pb serialized {@link Filter} instance * @return An instance of {@link Filter} made from bytes n * @see #toByteArray */ + @SuppressWarnings("DoNotCallSuggester") public static Filter parseFrom(final byte[] pbBytes) throws DeserializationException { throw new DeserializationException( "parseFrom called on base Filter, but should be called on derived type"); @@ -216,7 +217,6 @@ public static Filter parseFrom(final byte[] pbBytes) throws DeserializationExcep * Concrete implementers can signal a failure condition in their code by throwing an * {@link IOException}. n * @return true if and only if the fields of the filter that are * serialized are equal to the corresponding fields in other. Used for testing. - * @throws IOException in case an I/O or an filter specific failure needs to be signaled. */ abstract boolean areSerializedFieldsEqual(Filter other); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java index ff637c7f0527..3479d81e31ae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java @@ -71,7 +71,7 @@ public void filterRowCells(List ignored) throws IOException { } /** - * Fitlers that never filter by modifying the returned List of Cells can inherit this + * Filters that never filter by modifying the returned List of Cells can inherit this * implementation that does nothing. {@inheritDoc} */ @Override @@ -113,6 +113,7 @@ public boolean isFamilyEssential(byte[] name) throws IOException { * @param filterArguments the filter's arguments * @return constructed filter object */ + @SuppressWarnings("DoNotCallSuggester") public static Filter createFilterFromArguments(ArrayList filterArguments) { throw new IllegalArgumentException("This method has not been implemented"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index be3035858f13..b402e50ddaec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -107,16 +107,12 @@ public FilterList(final Operator operator, final Filter... filters) { this(operator, Arrays.asList(filters)); } - /** - * Get the operator. n - */ + /** Return the operator. */ public Operator getOperator() { return operator; } - /** - * Get the filters. n - */ + /** Return the filters. */ public List getFilters() { return filterListBase.getFilters(); } @@ -181,9 +177,7 @@ public boolean filterRow() throws IOException { return filterListBase.filterRow(); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() throws IOException { FilterProtos.FilterList.Builder builder = FilterProtos.FilterList.newBuilder(); @@ -196,8 +190,10 @@ public byte[] toByteArray() throws IOException { } /** + * Parse a serialized representation of this filter. * @param pbBytes A pb serialized {@link FilterList} instance - * @return An instance of {@link FilterList} made from bytes n * @see #toByteArray + * @return An instance of {@link FilterList} made from bytes + * @see #toByteArray */ public static FilterList parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FilterList proto; @@ -220,14 +216,17 @@ public static FilterList parseFrom(final byte[] pbBytes) throws DeserializationE } /** - * n * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter other) { - if (other == this) return true; - if (!(other instanceof FilterList)) return false; - + if (other == this) { + return true; + } + if (!(other instanceof FilterList)) { + return false; + } FilterList o = (FilterList) other; return this.getOperator().equals(o.getOperator()) && ((this.getFilters() == o.getFilters()) || this.getFilters().equals(o.getFilters())); @@ -262,7 +261,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java index 760b79d497d3..45e06f444547 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java @@ -270,12 +270,12 @@ public Cell getNextCellHint(Cell currentCell) throws IOException { @Override public boolean equals(Object obj) { - if (!(obj instanceof FilterListWithAND)) { - return false; - } if (this == obj) { return true; } + if (!(obj instanceof FilterListWithAND)) { + return false; + } FilterListWithAND f = (FilterListWithAND) obj; return this.filters.equals(f.getFilters()) && this.seekHintFilters.equals(f.seekHintFilters); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java index dd50a1bbb8d9..fbe68ab13527 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithOR.java @@ -394,12 +394,12 @@ public Cell getNextCellHint(Cell currentCell) throws IOException { @Override public boolean equals(Object obj) { - if (obj == null || (!(obj instanceof FilterListWithOR))) { - return false; - } if (this == obj) { return true; } + if (!(obj instanceof FilterListWithOR)) { + return false; + } FilterListWithOR f = (FilterListWithOR) obj; return this.filters.equals(f.getFilters()) && this.prevFilterRCList.equals(f.prevFilterRCList) && this.prevCellList.equals(f.prevCellList); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java index 83f8409facc8..5e758aca41d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Objects; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.yetus.audience.InterfaceAudience; @@ -65,23 +64,17 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new FirstKeyOnlyFilter(); } - /** - * @return true if first KV has been found. - */ + /** Return true if first KV has been found. */ protected boolean hasFoundKV() { return this.foundKV; } - /** - * @param value update {@link #foundKV} flag with value. - */ + /** Update {@link #foundKV} flag with value. */ protected void setFoundKV(boolean value) { this.foundKV = value; } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.FirstKeyOnlyFilter.Builder builder = FilterProtos.FirstKeyOnlyFilter.newBuilder(); @@ -89,9 +82,10 @@ public byte[] toByteArray() { } /** + * Parses a serialized representation of the filter * @param pbBytes A pb serialized {@link FirstKeyOnlyFilter} instance * @return An instance of {@link FirstKeyOnlyFilter} made from bytes - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException + * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred * @see #toByteArray */ public static FirstKeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -106,25 +100,27 @@ public static FirstKeyOnlyFilter parseFrom(final byte[] pbBytes) throws Deserial } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof FirstKeyOnlyFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof FirstKeyOnlyFilter)) { + return false; + } return true; } @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override public int hashCode() { - return Objects.hashCode(foundKV); + return Boolean.hashCode(foundKV); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java index cec4a2f06ff2..214b4010f8f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java @@ -32,10 +32,13 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter { /** + * Parse a serialized representation of the filter. * @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance * @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from - * bytes n * @see #toByteArray + * bytes + * @see #toByteArray */ + @SuppressWarnings("DoNotCallSuggester") public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte[] pbBytes) throws DeserializationException { throw new DeserializationException( diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java index a41763fcea30..9145011b1523 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java @@ -251,9 +251,7 @@ public boolean filterAllRemaining() { return done; } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.FuzzyRowFilter.Builder builder = FilterProtos.FuzzyRowFilter.newBuilder(); @@ -267,9 +265,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the filter * @param pbBytes A pb serialized {@link FuzzyRowFilter} instance - * @return An instance of {@link FuzzyRowFilter} made from bytes n * @see - * #toByteArray + * @return An instance of {@link FuzzyRowFilter} made from bytes + * @see #toByteArray */ public static FuzzyRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.FuzzyRowFilter proto; @@ -342,7 +341,7 @@ static SatisfiesCode satisfies(boolean reverse, byte[] row, int offset, int leng long fuzzyBytes = Bytes.toLong(fuzzyKeyBytes, i); long fuzzyMeta = Bytes.toLong(fuzzyKeyMeta, i); long rowValue = Bytes.toLong(row, offset + i); - if ((rowValue & fuzzyMeta) != (fuzzyBytes)) { + if ((rowValue & fuzzyMeta) != fuzzyBytes) { // We always return NEXT_EXISTS return SatisfiesCode.NEXT_EXISTS; } @@ -354,7 +353,7 @@ static SatisfiesCode satisfies(boolean reverse, byte[] row, int offset, int leng int fuzzyBytes = Bytes.toInt(fuzzyKeyBytes, off); int fuzzyMeta = Bytes.toInt(fuzzyKeyMeta, off); int rowValue = Bytes.toInt(row, offset + off); - if ((rowValue & fuzzyMeta) != (fuzzyBytes)) { + if ((rowValue & fuzzyMeta) != fuzzyBytes) { // We always return NEXT_EXISTS return SatisfiesCode.NEXT_EXISTS; } @@ -365,7 +364,7 @@ static SatisfiesCode satisfies(boolean reverse, byte[] row, int offset, int leng short fuzzyBytes = Bytes.toShort(fuzzyKeyBytes, off); short fuzzyMeta = Bytes.toShort(fuzzyKeyMeta, off); short rowValue = Bytes.toShort(row, offset + off); - if ((rowValue & fuzzyMeta) != (fuzzyBytes)) { + if ((rowValue & fuzzyMeta) != fuzzyBytes) { // We always return NEXT_EXISTS // even if it does not (in this case getNextForFuzzyRule // will return null) @@ -378,7 +377,7 @@ static SatisfiesCode satisfies(boolean reverse, byte[] row, int offset, int leng int fuzzyBytes = fuzzyKeyBytes[off] & 0xff; int fuzzyMeta = fuzzyKeyMeta[off] & 0xff; int rowValue = row[offset + off] & 0xff; - if ((rowValue & fuzzyMeta) != (fuzzyBytes)) { + if ((rowValue & fuzzyMeta) != fuzzyBytes) { // We always return NEXT_EXISTS return SatisfiesCode.NEXT_EXISTS; } @@ -519,8 +518,8 @@ public static Order orderFor(boolean reverse) { } /** - * @return greater byte array than given (row) which satisfies the fuzzy rule if it exists, null - * otherwise + * Return greater byte array than given (row) which satisfies the fuzzy rule if it exists, null + * otherwise */ static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, int offset, int length, byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) { @@ -605,14 +604,17 @@ private static byte[] trimTrailingZeroes(byte[] result, byte[] fuzzyKeyMeta, int } /** - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof FuzzyRowFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof FuzzyRowFilter)) { + return false; + } FuzzyRowFilter other = (FuzzyRowFilter) o; if (this.fuzzyKeysData.size() != other.fuzzyKeysData.size()) return false; for (int i = 0; i < fuzzyKeysData.size(); ++i) { @@ -630,7 +632,7 @@ boolean areSerializedFieldsEqual(Filter o) { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java index 7f42fb633c33..20cc8edb2d85 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java @@ -75,9 +75,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new InclusiveStopFilter(stopRowKey); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.InclusiveStopFilter.Builder builder = @@ -88,9 +86,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this filter. * @param pbBytes A pb serialized {@link InclusiveStopFilter} instance - * @return An instance of {@link InclusiveStopFilter} made from bytes n * @see - * #toByteArray + * @return An instance of {@link InclusiveStopFilter} made from bytes + * @see #toByteArray */ public static InclusiveStopFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -105,15 +104,17 @@ public static InclusiveStopFilter parseFrom(final byte[] pbBytes) } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof InclusiveStopFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof InclusiveStopFilter)) { + return false; + } InclusiveStopFilter other = (InclusiveStopFilter) o; return Bytes.equals(this.getStopRowKey(), other.getStopRowKey()); } @@ -125,7 +126,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java index c91543d29af1..24d5f1c9b3a0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/IncompatibleFilterException.java @@ -26,15 +26,10 @@ public class IncompatibleFilterException extends RuntimeException { private static final long serialVersionUID = 3236763276623198231L; - /** constructor */ public IncompatibleFilterException() { super(); } - /** - * constructor - * @param s message - */ public IncompatibleFilterException(String s) { super(s); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java index 9e48cbba1fe8..4218cccdf70b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InvalidRowFilterException.java @@ -26,15 +26,10 @@ public class InvalidRowFilterException extends RuntimeException { private static final long serialVersionUID = 2667894046345657865L; - /** constructor */ public InvalidRowFilterException() { super(); } - /** - * constructor - * @param s message - */ public InvalidRowFilterException(String s) { super(s); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java index 6aa410730d8f..782e89fc5232 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java @@ -93,9 +93,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return filter; } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.KeyOnlyFilter.Builder builder = FilterProtos.KeyOnlyFilter.newBuilder(); @@ -104,8 +102,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this filter. * @param pbBytes A pb serialized {@link KeyOnlyFilter} instance - * @return An instance of {@link KeyOnlyFilter} made from bytes n * @see #toByteArray + * @return An instance of {@link KeyOnlyFilter} made from bytes + * @see #toByteArray */ public static KeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.KeyOnlyFilter proto; @@ -118,22 +118,24 @@ public static KeyOnlyFilter parseFrom(final byte[] pbBytes) throws Deserializati } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof KeyOnlyFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof KeyOnlyFilter)) { + return false; + } KeyOnlyFilter other = (KeyOnlyFilter) o; return this.lenAsVal == other.lenAsVal; } @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java index ead0ee104470..e0c8a3518b64 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java @@ -53,9 +53,7 @@ public int compareTo(ByteBuffer value, int offset, int length) { return Long.compare(longValue, that); } - /** - * @return The comparator serialized using pb - */ + /** Return the comparator serialized using pb */ @Override public byte[] toByteArray() { ComparatorProtos.LongComparator.Builder builder = ComparatorProtos.LongComparator.newBuilder(); @@ -64,9 +62,9 @@ public byte[] toByteArray() { } /** + * Parse the serialized representation of this comparator. * @param pbBytes A pb serialized {@link LongComparator} instance * @return An instance of {@link LongComparator} made from bytes - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException * @see #toByteArray */ public static LongComparator parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -80,8 +78,8 @@ public static LongComparator parseFrom(final byte[] pbBytes) throws Deserializat } /** - * n * @return true if and only if the fields of the comparator that are serialized are equal to - * the corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. */ boolean areSerializedFieldsEqual(LongComparator other) { if (other == this) return true; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java index 1fc22cfcdf78..aec20e4cbe5d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java @@ -62,9 +62,6 @@ public class MultiRowRangeFilter extends FilterBase { private BasicRowRange range; private ReturnCode currentReturnCode; - /** - * @param list A list of RowRange - */ public MultiRowRangeFilter(List list) { // We don't use rangeList anywhere else, but keeping it lets us pay a little // memory to avoid touching the serialization logic. @@ -199,9 +196,10 @@ public byte[] toByteArray() { } /** + * Parse the serialized representation of this filter. * @param pbBytes A pb serialized instance * @return An instance of MultiRowRangeFilter - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException + * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred */ public static MultiRowRangeFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -226,9 +224,8 @@ public static MultiRowRangeFilter parseFrom(final byte[] pbBytes) } /** - * @param o the filter to compare - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { @@ -469,16 +466,12 @@ public byte[] getStopRow() { return stopRow; } - /** - * @return if start row is inclusive. - */ + /** Return if start row is inclusive. */ public boolean isStartRowInclusive() { return startRowInclusive; } - /** - * @return if stop row is inclusive. - */ + /** Return if stop row is inclusive. */ public boolean isStopRowInclusive() { return stopRowInclusive; } @@ -520,12 +513,12 @@ public boolean isValid() { @Override public boolean equals(Object obj) { - if (!(obj instanceof BasicRowRange)) { - return false; - } if (this == obj) { return true; } + if (!(obj instanceof BasicRowRange)) { + return false; + } BasicRowRange rr = (BasicRowRange) obj; return Bytes.equals(this.stopRow, rr.getStopRow()) && Bytes.equals(this.startRow, this.getStartRow()) @@ -722,45 +715,33 @@ public void setFoundFirstRange() { this.foundFirstRange = true; } - /** - * Gets the RowRange at the given offset. - */ - @SuppressWarnings("unchecked") + /** Gets the RowRange at the given offset. */ + @SuppressWarnings({ "unchecked", "TypeParameterUnusedInFormals" }) public T get(int i) { return (T) ranges.get(i); } - /** - * Returns true if the first matching row range was found. - */ + /** Returns true if the first matching row range was found. */ public boolean hasFoundFirstRange() { return foundFirstRange; } - /** - * Returns true if the current range's key is exclusive - */ + /** Returns true if the current range's key is exclusive */ public boolean isExclusive() { return exclusive; } - /** - * Resets the exclusive flag. - */ + /** Resets the exclusive flag. */ public void resetExclusive() { exclusive = false; } - /** - * Returns true if this class has been initialized by calling {@link #initialize(boolean)}. - */ + /** Returns true if this class has been initialized by calling {@link #initialize(boolean)}. */ public boolean isInitialized() { return initialized; } - /** - * Returns true if we exhausted searching all row ranges. - */ + /** Returns true if we exhausted searching all row ranges. */ public boolean isIterationComplete(int index) { return index >= ranges.size(); } @@ -768,7 +749,7 @@ public boolean isIterationComplete(int index) { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java index 0d75d16bb3b8..66fe862f9552 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java @@ -116,9 +116,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new MultipleColumnPrefixFilter(prefixes); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb. */ @Override public byte[] toByteArray() { FilterProtos.MultipleColumnPrefixFilter.Builder builder = @@ -130,9 +128,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of this filter. * @param pbBytes A pb serialized {@link MultipleColumnPrefixFilter} instance - * @return An instance of {@link MultipleColumnPrefixFilter} made from bytes n * @see - * #toByteArray + * @return An instance of {@link MultipleColumnPrefixFilter} made from bytes + * @see #toByteArray */ public static MultipleColumnPrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -152,9 +151,8 @@ public static MultipleColumnPrefixFilter parseFrom(final byte[] pbBytes) } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { @@ -209,7 +207,7 @@ protected String toString(int maxPrefixes) { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java index fc0562ecb3e9..7fc7b991b546 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java @@ -63,9 +63,7 @@ public int compareTo(ByteBuffer value, int offset, int length) { return value != null ? 1 : 0; } - /** - * @return The comparator serialized using pb - */ + /** Return the comparator serialized using pb */ @Override public byte[] toByteArray() { ComparatorProtos.NullComparator.Builder builder = ComparatorProtos.NullComparator.newBuilder(); @@ -73,6 +71,7 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the comparator. * @param pbBytes A pb serialized {@link NullComparator} instance * @return An instance of {@link NullComparator} made from bytes n * @see * #toByteArray @@ -88,8 +87,8 @@ public static NullComparator parseFrom(final byte[] pbBytes) throws Deserializat } /** - * n * @return true if and only if the fields of the comparator that are serialized are equal to - * the corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java index 445adf2129e9..9df42f9faebc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java @@ -91,9 +91,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new PageFilter(pageSize); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.PageFilter.Builder builder = FilterProtos.PageFilter.newBuilder(); @@ -102,8 +100,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the filter. * @param pbBytes A pb serialized {@link PageFilter} instance - * @return An instance of {@link PageFilter} made from bytes n * @see #toByteArray + * @return An instance of {@link PageFilter} made from bytes + * @see #toByteArray */ public static PageFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.PageFilter proto; @@ -116,9 +116,8 @@ public static PageFilter parseFrom(final byte[] pbBytes) throws DeserializationE } /** - * @param o other Filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { @@ -128,7 +127,6 @@ boolean areSerializedFieldsEqual(Filter o) { if (!(o instanceof PageFilter)) { return false; } - PageFilter other = (PageFilter) o; return this.getPageSize() == other.getPageSize(); } @@ -140,7 +138,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index b08ce971c213..af672e93d934 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -109,6 +109,7 @@ public Filter parseFilterString(String filterString) throws CharacterCodingExcep * @param filterStringAsByteArray filter string given by the user * @return filter object we constructed */ + @SuppressWarnings("JdkObsolete") public Filter parseFilterString(byte[] filterStringAsByteArray) throws CharacterCodingException { // stack for the operators and parenthesis Stack operatorStack = new Stack<>(); @@ -156,7 +157,7 @@ public Filter parseFilterString(byte[] filterStringAsByteArray) throws Character operatorStack.pop(); continue; } - while (!(argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER))) { + while (!argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) { filterStack.push(popArguments(operatorStack, filterStack)); if (operatorStack.empty()) { throw new IllegalArgumentException("Mismatched parenthesis"); @@ -364,10 +365,11 @@ public static ArrayList getFilterArguments(byte[] filterStringAsByteArra * @param filterStack the stack containing the filters * @param operator the operator found while parsing the filterString */ + @SuppressWarnings("JdkObsolete") public void reduce(Stack operatorStack, Stack filterStack, ByteBuffer operator) { while ( - !operatorStack.empty() && !(ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek())) + !operatorStack.empty() && !ParseConstants.LPAREN_BUFFER.equals(operatorStack.peek()) && hasHigherPriority(operatorStack.peek(), operator) ) { filterStack.push(popArguments(operatorStack, filterStack)); @@ -382,6 +384,7 @@ && hasHigherPriority(operatorStack.peek(), operator) * @param filterStack the stack containing the filters * @return the evaluated filter */ + @SuppressWarnings("JdkObsolete") public static Filter popArguments(Stack operatorStack, Stack filterStack) { ByteBuffer argumentOnTopOfStack = operatorStack.peek(); @@ -841,9 +844,7 @@ public static byte[][] parseComparator(byte[] comparator) { return result; } - /** - * Return a Set of filters supported by the Filter Language - */ + /** Return a Set of filters supported by the Filter Language */ public Set getSupportedFilters() { return filterHashMap.keySet(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java index 3b40388a06f9..a28c9cf4cfe3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java @@ -101,9 +101,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new PrefixFilter(prefix); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.PrefixFilter.Builder builder = FilterProtos.PrefixFilter.newBuilder(); @@ -112,9 +110,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the filter. * @param pbBytes A pb serialized {@link PrefixFilter} instance * @return An instance of {@link PrefixFilter} made from bytes - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException + * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred * @see #toByteArray */ public static PrefixFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -128,15 +127,17 @@ public static PrefixFilter parseFrom(final byte[] pbBytes) throws Deserializatio } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof PrefixFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof PrefixFilter)) { + return false; + } PrefixFilter other = (PrefixFilter) o; return Bytes.equals(this.getPrefix(), other.getPrefix()); } @@ -148,7 +149,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java index 2d52b300ff87..98a56f9f109a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java @@ -67,9 +67,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new QualifierFilter(compareOp, comparator); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.QualifierFilter.Builder builder = FilterProtos.QualifierFilter.newBuilder(); @@ -78,9 +76,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the filter. * @param pbBytes A pb serialized {@link QualifierFilter} instance * @return An instance of {@link QualifierFilter} made from bytes - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException + * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred * @see #toByteArray */ public static QualifierFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -104,20 +103,23 @@ public static QualifierFilter parseFrom(final byte[] pbBytes) throws Deserializa } /** - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof QualifierFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof QualifierFilter)) { + return false; + } return super.areSerializedFieldsEqual(o); } @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java index d54d7575f254..975e2abf173b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java @@ -36,23 +36,17 @@ public class RandomRowFilter extends FilterBase { protected float chance; protected boolean filterOutRow; - /** - * Create a new filter with a specified chance for a row to be included. n - */ + /** Create a new filter with a specified chance for a row to be included. */ public RandomRowFilter(float chance) { this.chance = chance; } - /** - * @return The chance that a row gets included. - */ + /** Return the chance that a row gets included. */ public float getChance() { return chance; } - /** - * Set the chance that a row is included. n - */ + /** Set the chance that a row is included. */ public void setChance(float chance) { this.chance = chance; } @@ -100,9 +94,7 @@ public void reset() { filterOutRow = false; } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.RandomRowFilter.Builder builder = FilterProtos.RandomRowFilter.newBuilder(); @@ -111,9 +103,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized filter instance. * @param pbBytes A pb serialized {@link RandomRowFilter} instance - * @return An instance of {@link RandomRowFilter} made from bytes n * @see - * #toByteArray + * @return An instance of {@link RandomRowFilter} made from bytes + * @see #toByteArray */ public static RandomRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.RandomRowFilter proto; @@ -126,22 +119,24 @@ public static RandomRowFilter parseFrom(final byte[] pbBytes) throws Deserializa } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof RandomRowFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof RandomRowFilter)) { + return false; + } RandomRowFilter other = (RandomRowFilter) o; return this.getChance() == other.getChance(); } @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java index 75272c5f2413..66479d012ab6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java @@ -46,7 +46,6 @@ * Only EQUAL or NOT_EQUAL comparisons are valid with this comparator. *

* For example: - *

* *

  * ValueFilter vf = new ValueFilter(CompareOp.EQUAL, new RegexStringComparator(
@@ -57,9 +56,8 @@
  *     "((([\\dA-Fa-f]{1,4}:){7}[\\dA-Fa-f]{1,4})(:([\\d]{1,3}.)"
  *     + "{3}[\\d]{1,3})?)(\\/[0-9]+)?"));
  * 
- *

+ * * Supports {@link java.util.regex.Pattern} flags as well: - *

* *

  * ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
@@ -145,15 +143,14 @@ public int compareTo(byte[] value, int offset, int length) {
     return engine.compareTo(value, offset, length);
   }
 
-  /**
-   * @return The comparator serialized using pb
-   */
+  /** Return the comparator serialized using pb */
   @Override
   public byte[] toByteArray() {
     return engine.toByteArray();
   }
 
   /**
+   * Parse a serialized representation of the comparator
    * @param pbBytes A pb serialized {@link RegexStringComparator} instance
    * @return An instance of {@link RegexStringComparator} made from bytes n * @see
    *         #toByteArray
@@ -185,13 +182,17 @@ public static RegexStringComparator parseFrom(final byte[] pbBytes)
   }
 
   /**
-   * n * @return true if and only if the fields of the comparator that are serialized are equal to
-   * the corresponding fields in other. Used for testing.
+   * Return true if and only if the fields of the comparator that are serialized are equal to the
+   * corresponding fields in other.
    */
   @Override
   boolean areSerializedFieldsEqual(ByteArrayComparable other) {
-    if (other == this) return true;
-    if (!(other instanceof RegexStringComparator)) return false;
+    if (other == this) {
+      return true;
+    }
+    if (!(other instanceof RegexStringComparator)) {
+      return false;
+    }
     RegexStringComparator comparator = (RegexStringComparator) other;
     return super.areSerializedFieldsEqual(comparator)
       && engine.getClass().isInstance(comparator.getEngine())
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
index dee91657f745..c28aa536fe77 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
@@ -87,9 +87,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments
     return new RowFilter(compareOp, comparator);
   }
 
-  /**
-   * @return The filter serialized using pb
-   */
+  /** Return the filter serialized using pb */
   @Override
   public byte[] toByteArray() {
     FilterProtos.RowFilter.Builder builder = FilterProtos.RowFilter.newBuilder();
@@ -98,8 +96,10 @@ public byte[] toByteArray() {
   }
 
   /**
+   * Parse a serialized representation of the filter
    * @param pbBytes A pb serialized {@link RowFilter} instance
-   * @return An instance of {@link RowFilter} made from bytes n * @see #toByteArray
+   * @return An instance of {@link RowFilter} made from bytes
+   * @see #toByteArray
    */
   public static RowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
     FilterProtos.RowFilter proto;
@@ -122,20 +122,23 @@ public static RowFilter parseFrom(final byte[] pbBytes) throws DeserializationEx
   }
 
   /**
-   * @return true if and only if the fields of the filter that are serialized are equal to the
-   *         corresponding fields in other. Used for testing.
+   * Return true if and only if the fields of the filter that are serialized are equal to the
+   * corresponding fields in other.
    */
   @Override
   boolean areSerializedFieldsEqual(Filter o) {
-    if (o == this) return true;
-    if (!(o instanceof RowFilter)) return false;
-
+    if (o == this) {
+      return true;
+    }
+    if (!(o instanceof RowFilter)) {
+      return false;
+    }
     return super.areSerializedFieldsEqual(o);
   }
 
   @Override
   public boolean equals(Object obj) {
-    return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj);
+    return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj);
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
index f1b9413718f0..91b0a17e002f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
@@ -112,9 +112,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments
     return filter;
   }
 
-  /**
-   * @return The filter serialized using pb
-   */
+  /** Return the filter serialized using pb */
   @Override
   public byte[] toByteArray() {
     FilterProtos.SingleColumnValueExcludeFilter.Builder builder =
@@ -124,9 +122,10 @@ public byte[] toByteArray() {
   }
 
   /**
+   * Parse a serialized representation of the filter
    * @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance
-   * @return An instance of {@link SingleColumnValueExcludeFilter} made from bytes n
-   *         * @see #toByteArray
+   * @return An instance of {@link SingleColumnValueExcludeFilter} made from bytes
+   * @see #toByteArray
    */
   public static SingleColumnValueExcludeFilter parseFrom(final byte[] pbBytes)
     throws DeserializationException {
@@ -153,20 +152,23 @@ public static SingleColumnValueExcludeFilter parseFrom(final byte[] pbBytes)
   }
 
   /**
-   * @return true if and only if the fields of the filter that are serialized are equal to the
-   *         corresponding fields in other. Used for testing.
+   * Return true if and only if the fields of the filter that are serialized are equal to the
+   * corresponding fields in other.
    */
   @Override
   boolean areSerializedFieldsEqual(Filter o) {
-    if (o == this) return true;
-    if (!(o instanceof SingleColumnValueExcludeFilter)) return false;
-
+    if (o == this) {
+      return true;
+    }
+    if (!(o instanceof SingleColumnValueExcludeFilter)) {
+      return false;
+    }
     return super.areSerializedFieldsEqual(o);
   }
 
   @Override
   public boolean equals(Object obj) {
-    return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj);
+    return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj);
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
index 365ee06b904b..38a7a5991783 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
@@ -125,23 +125,17 @@ public CompareOperator getCompareOperator() {
     return op;
   }
 
-  /**
-   * @return the comparator
-   */
+  /** Return the comparator */
   public org.apache.hadoop.hbase.filter.ByteArrayComparable getComparator() {
     return comparator;
   }
 
-  /**
-   * @return the family
-   */
+  /** Return the family */
   public byte[] getFamily() {
     return columnFamily;
   }
 
-  /**
-   * @return the qualifier
-   */
+  /** Return the qualifier */
   public byte[] getQualifier() {
     return columnQualifier;
   }
@@ -283,15 +277,14 @@ FilterProtos.SingleColumnValueFilter convert() {
     return builder.build();
   }
 
-  /**
-   * @return The filter serialized using pb
-   */
+  /** Return the filter serialized using pb */
   @Override
   public byte[] toByteArray() {
     return convert().toByteArray();
   }
 
   /**
+   * Parse a serialized representation of this filter.
    * @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance
    * @return An instance of {@link SingleColumnValueFilter} made from bytes
    * @see #toByteArray
@@ -320,14 +313,17 @@ public static SingleColumnValueFilter parseFrom(final byte[] pbBytes)
   }
 
   /**
-   * @return true if and only if the fields of the filter that are serialized are equal to the
-   *         corresponding fields in other. Used for testing.
+   * Return true if and only if the fields of the filter that are serialized are equal to the
+   * corresponding fields in other.
    */
   @Override
   boolean areSerializedFieldsEqual(Filter o) {
-    if (o == this) return true;
-    if (!(o instanceof SingleColumnValueFilter)) return false;
-
+    if (o == this) {
+      return true;
+    }
+    if (!(o instanceof SingleColumnValueFilter)) {
+      return false;
+    }
     SingleColumnValueFilter other = (SingleColumnValueFilter) o;
     return Bytes.equals(this.getFamily(), other.getFamily())
       && Bytes.equals(this.getQualifier(), other.getQualifier()) && this.op.equals(other.op)
@@ -355,7 +351,7 @@ public String toString() {
 
   @Override
   public boolean equals(Object obj) {
-    return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj);
+    return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj);
   }
 
   @Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
index 3aa0ef22c151..36555e0f1162 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java
@@ -35,14 +35,15 @@
  * the actual weights, and we want to filter out the entire row if any of its weights are zero. In
  * this case, we want to prevent rows from being emitted if a single key is filtered. Combine this
  * filter with a {@link ValueFilter}:
- * 

- *

- * + * + *

  * scan.setFilter(new SkipFilter(new ValueFilter(CompareOp.NOT_EQUAL,
  *     new BinaryComparator(Bytes.toBytes(0))));
- *  Any row which contained a column whose value was 0 will be filtered out (since
- * ValueFilter will not pass that Cell). Without this filter, the other non-zero valued columns in
- * the row would still be emitted.
+ * 
+ * + * Any row which contained a column whose value was 0 will be filtered out (since ValueFilter will + * not pass that Cell). Without this filter, the other non-zero valued columns in the row would + * still be emitted. *

*/ @InterfaceAudience.Public @@ -96,9 +97,7 @@ public boolean hasFilterRow() { return true; } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() throws IOException { FilterProtos.SkipFilter.Builder builder = FilterProtos.SkipFilter.newBuilder(); @@ -107,8 +106,10 @@ public byte[] toByteArray() throws IOException { } /** + * Parse a serialized representation of the filter. * @param pbBytes A pb serialized {@link SkipFilter} instance - * @return An instance of {@link SkipFilter} made from bytes n * @see #toByteArray + * @return An instance of {@link SkipFilter} made from bytes + * @see #toByteArray */ public static SkipFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.SkipFilter proto; @@ -125,15 +126,17 @@ public static SkipFilter parseFrom(final byte[] pbBytes) throws DeserializationE } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof SkipFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof SkipFilter)) { + return false; + } SkipFilter other = (SkipFilter) o; return getFilter().areSerializedFieldsEqual(other.getFilter()); } @@ -150,7 +153,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java index b8e33c438feb..3ba56be0b059 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java @@ -47,10 +47,6 @@ public class SubstringComparator extends ByteArrayComparable { private String substr; - /** - * Constructor - * @param substr the substring - */ public SubstringComparator(String substr) { super(Bytes.toBytes(substr.toLowerCase(Locale.ROOT))); this.substr = substr.toLowerCase(Locale.ROOT); @@ -66,9 +62,7 @@ public int compareTo(byte[] value, int offset, int length) { return Bytes.toString(value, offset, length).toLowerCase(Locale.ROOT).contains(substr) ? 0 : 1; } - /** - * @return The comparator serialized using pb - */ + /** Return the comparator serialized using pb */ @Override public byte[] toByteArray() { ComparatorProtos.SubstringComparator.Builder builder = @@ -78,6 +72,7 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the comparator. * @param pbBytes A pb serialized {@link SubstringComparator} instance * @return An instance of {@link SubstringComparator} made from bytes n * @see * #toByteArray @@ -94,14 +89,17 @@ public static SubstringComparator parseFrom(final byte[] pbBytes) } /** - * n * @return true if and only if the fields of the comparator that are serialized are equal to - * the corresponding fields in other. Used for testing. + * Return true if and only if the fields of the comparator that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { - if (other == this) return true; - if (!(other instanceof SubstringComparator)) return false; - + if (other == this) { + return true; + } + if (!(other instanceof SubstringComparator)) { + return false; + } SubstringComparator comparator = (SubstringComparator) other; return super.areSerializedFieldsEqual(comparator) && this.substr.equals(comparator.substr); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java index dfd2f5c537c9..05f825fcf127 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java @@ -77,9 +77,7 @@ public TimestampsFilter(List timestamps, boolean canHint) { init(); } - /** - * @return the list of timestamps - */ + /** Return the list of timestamps */ public List getTimestamps() { List list = new ArrayList<>(timestamps.size()); list.addAll(timestamps); @@ -171,6 +169,7 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the filter * @param pbBytes A pb serialized {@link TimestampsFilter} instance * @return An instance of {@link TimestampsFilter} made from bytes * @see #toByteArray @@ -187,15 +186,17 @@ public static TimestampsFilter parseFrom(final byte[] pbBytes) throws Deserializ } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof TimestampsFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof TimestampsFilter)) { + return false; + } TimestampsFilter other = (TimestampsFilter) o; return this.getTimestamps().equals(other.getTimestamps()); } @@ -226,7 +227,7 @@ protected String toString(int maxTimestamps) { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java index 810f71efbd67..dcbcf89b1c54 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java @@ -69,9 +69,7 @@ public static Filter createFilterFromArguments(ArrayList filterArguments return new ValueFilter(compareOp, comparator); } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() { FilterProtos.ValueFilter.Builder builder = FilterProtos.ValueFilter.newBuilder(); @@ -80,8 +78,10 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of the filter * @param pbBytes A pb serialized {@link ValueFilter} instance - * @return An instance of {@link ValueFilter} made from bytes n * @see #toByteArray + * @return An instance of {@link ValueFilter} made from bytes + * @see #toByteArray */ public static ValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException { FilterProtos.ValueFilter proto; @@ -104,20 +104,23 @@ public static ValueFilter parseFrom(final byte[] pbBytes) throws Deserialization } /** - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof ValueFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof ValueFilter)) { + return false; + } return super.areSerializedFieldsEqual(o); } @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java index 94cdd9794b3c..31bab3d19004 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java @@ -94,9 +94,7 @@ public boolean hasFilterRow() { return true; } - /** - * @return The filter serialized using pb - */ + /** Return the filter serialized using pb */ @Override public byte[] toByteArray() throws IOException { FilterProtos.WhileMatchFilter.Builder builder = FilterProtos.WhileMatchFilter.newBuilder(); @@ -105,9 +103,10 @@ public byte[] toByteArray() throws IOException { } /** + * Parse a serialized representation of the filter * @param pbBytes A pb serialized {@link WhileMatchFilter} instance * @return An instance of {@link WhileMatchFilter} made from bytes - * @throws org.apache.hadoop.hbase.exceptions.DeserializationException + * @throws org.apache.hadoop.hbase.exceptions.DeserializationException if an error occurred * @see #toByteArray */ public static WhileMatchFilter parseFrom(final byte[] pbBytes) throws DeserializationException { @@ -125,15 +124,17 @@ public static WhileMatchFilter parseFrom(final byte[] pbBytes) throws Deserializ } /** - * @param o the other filter to compare with - * @return true if and only if the fields of the filter that are serialized are equal to the - * corresponding fields in other. Used for testing. + * Return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. */ @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof WhileMatchFilter)) return false; - + if (o == this) { + return true; + } + if (!(o instanceof WhileMatchFilter)) { + return false; + } WhileMatchFilter other = (WhileMatchFilter) o; return getFilter().areSerializedFieldsEqual(other.getFilter()); } @@ -150,7 +151,7 @@ public String toString() { @Override public boolean equals(Object obj) { - return obj instanceof Filter && areSerializedFieldsEqual((Filter) obj); + return (obj instanceof Filter) && areSerializedFieldsEqual((Filter) obj); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 7c0149ccb8a3..9d557a98743c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -238,7 +238,8 @@ Codec getCodec() { return null; } try { - return (Codec) Class.forName(className).getDeclaredConstructor().newInstance(); + return Class.forName(className).asSubclass(Codec.class).getDeclaredConstructor() + .newInstance(); } catch (Exception e) { throw new RuntimeException("Failed getting codec " + className, e); } @@ -265,7 +266,8 @@ private static CompressionCodec getCompressor(final Configuration conf) { return null; } try { - return (CompressionCodec) Class.forName(className).getDeclaredConstructor().newInstance(); + return Class.forName(className).asSubclass(CompressionCodec.class).getDeclaredConstructor() + .newInstance(); } catch (Exception e) { throw new RuntimeException("Failed getting compressor " + className, e); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java index c8adc6a8cc3c..da032cbeb9d1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java @@ -67,7 +67,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hbase.thirdparty.com.google.protobuf.Message.Builder; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator; @@ -181,6 +180,8 @@ public void run() { try { BlockingRpcConnection.this.wait(); } catch (InterruptedException e) { + // Restore interrupt status + Thread.currentThread().interrupt(); } // check if we need to quit, so continue the main loop instead of fallback. continue; @@ -333,6 +334,8 @@ private synchronized boolean waitForWork() { try { wait(Math.min(this.rpcClient.minIdleTimeBeforeClose, 1000)); } catch (InterruptedException e) { + // Restore interrupt status + Thread.currentThread().interrupt(); } } } @@ -685,7 +688,7 @@ private void readResponse() { } else { Message value = null; if (call.responseDefaultType != null) { - Builder builder = call.responseDefaultType.newBuilderForType(); + Message.Builder builder = call.responseDefaultType.newBuilderForType(); ProtobufUtil.mergeDelimitedFrom(builder, in); value = builder.build(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java index 3dc48ce3e00e..ec7f422e64d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BufferCallBeforeInitHandler.java @@ -65,6 +65,7 @@ public static BufferCallBeforeInitHandler.BufferCallEvent fail(IOException error private final Map id2Call = new HashMap<>(); @Override + @SuppressWarnings("FutureReturnValueIgnored") public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) { if (msg instanceof Call) { Call call = (Call) msg; @@ -78,7 +79,8 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) } @Override - public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { + @SuppressWarnings("FutureReturnValueIgnored") + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { if (evt instanceof BufferCallEvent) { BufferCallEvent bcEvt = (BufferCallBeforeInitHandler.BufferCallEvent) evt; switch (bcEvt.action) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java index 9e9c0688ecee..92f3bbea12c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java @@ -228,6 +228,7 @@ public ByteBufferListOutputStream buildCellBlockStream(Codec codec, CompressionC } /** + * Create a new CellScanner * @param codec to use for cellblock * @param cellBlock to encode * @return CellScanner to work against the content of cellBlock @@ -248,6 +249,7 @@ public CellScanner createCellScanner(final Codec codec, final CompressionCodec c } /** + * Create a new CellScanner, reusing provided byte buffers * @param codec to use for cellblock * @param cellBlock ByteBuffer containing the cells written by the Codec. The buffer should be * position()'ed at the start of the cell block and limit()'ed at the end. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java index 6cb9cddd9feb..2b5c3765c170 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java @@ -57,6 +57,7 @@ public String toString() { } @Override + @SuppressWarnings("ReferenceEquality") public boolean equals(Object obj) { if (obj instanceof ConnectionId) { ConnectionId id = (ConnectionId) obj; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java index b33771e5b582..06342ecfe27f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRpcController.java @@ -50,19 +50,19 @@ public interface HBaseRpcController extends RpcController, CellScannable { void setCellScanner(CellScanner cellScanner); /** + * Set a priority for the request. * @param priority Priority for this request; should fall roughly in the range * {@link HConstants#NORMAL_QOS} to {@link HConstants#HIGH_QOS} */ void setPriority(int priority); /** + * Set a priority for the request based on a table name. * @param tn Set priority based off the table we are going against. */ void setPriority(final TableName tn); - /** - * @return The priority of this request - */ + /** Return the priority of this request */ int getPriority(); int getCallTimeout(); @@ -83,6 +83,8 @@ public interface HBaseRpcController extends RpcController, CellScannable { IOException getFailed(); /** + * Indicate the call has finished. + *

* IMPORTANT: always call this method if the call finished without any exception to tell * the {@code HBaseRpcController} that we are done. */ @@ -110,16 +112,12 @@ interface CancellationCallback { */ void notifyOnCancel(RpcCallback callback, CancellationCallback action) throws IOException; - /** - * @return True if this Controller is carrying the RPC target Region's RegionInfo. - */ + /** Return true if this Controller is carrying the RPC target Region's RegionInfo. */ default boolean hasRegionInfo() { return false; } - /** - * @return Target Region's RegionInfo or null if not available or pertinent. - */ + /** Return the target Region's RegionInfo or null if not available or pertinent. */ default RegionInfo getRegionInfo() { return null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index 57f8da98eff6..e97b9bc4d876 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -96,9 +96,7 @@ private static int write(final OutputStream dos, final Message header, final Mes return totalSize; } - /** - * @return Size on the wire when the two messages are written with writeDelimitedTo - */ + /** Return the size on the wire when the two messages are written with writeDelimitedTo */ public static int getTotalSizeWhenWrittenDelimited(Message... messages) { int totalSize = 0; for (Message m : messages) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java index 7b698958ede2..f915b0cc7910 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcClient.java @@ -76,6 +76,7 @@ protected NettyRpcConnection createConnection(ConnectionId remoteId) throws IOEx } @Override + @SuppressWarnings("FutureReturnValueIgnored") protected void closeInternal() { if (shutdownGroupWhenClose) { group.shutdownGracefully(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 14e8cbc13d3b..9d811437ed20 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -126,6 +126,7 @@ public boolean isActive() { return channel != null; } + @SuppressWarnings("FutureReturnValueIgnored") private void shutdown0() { assert eventLoop.inEventLoop(); if (channel != null) { @@ -167,6 +168,7 @@ private void established(Channel ch) throws IOException { private boolean reloginInProgress; + @SuppressWarnings("FutureReturnValueIgnored") private void scheduleRelogin(Throwable error) { assert eventLoop.inEventLoop(); if (error instanceof FallbackDisallowedException) { @@ -199,6 +201,7 @@ private void failInit(Channel ch, IOException e) { shutdown0(); } + @SuppressWarnings("FutureReturnValueIgnored") private void saslNegotiate(final Channel ch) { assert eventLoop.inEventLoop(); UserGroupInformation ticket = provider.getRealUser(remoteId.getTicket()); @@ -220,6 +223,7 @@ private void saslNegotiate(final Channel ch) { saslPromise.addListener(new FutureListener() { @Override + @SuppressWarnings("FutureReturnValueIgnored") public void operationComplete(Future future) throws Exception { if (future.isSuccess()) { ChannelPipeline p = ch.pipeline(); @@ -281,6 +285,7 @@ private void connect() throws UnknownHostException { .remoteAddress(remoteAddr).connect().addListener(new ChannelFutureListener() { @Override + @SuppressWarnings("FutureReturnValueIgnored") public void operationComplete(ChannelFuture future) throws Exception { Channel ch = future.channel(); if (!future.isSuccess()) { @@ -317,6 +322,7 @@ public void run(Object parameter) { }, new CancellationCallback() { @Override + @SuppressWarnings("FutureReturnValueIgnored") public void run(boolean cancelled) throws IOException { if (cancelled) { setCancelled(call); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index fe32189f81bd..08f972e184f2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -31,7 +31,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hbase.thirdparty.com.google.protobuf.Message.Builder; import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; @@ -76,6 +75,7 @@ public NettyRpcDuplexHandler(NettyRpcConnection conn, CellBlockBuilder cellBlock } + @SuppressWarnings("FutureReturnValueIgnored") private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise promise) throws IOException { id2Call.put(call.id, call); @@ -114,6 +114,7 @@ private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise p } @Override + @SuppressWarnings("FutureReturnValueIgnored") public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { if (msg instanceof Call) { @@ -168,7 +169,7 @@ private void readResponse(ChannelHandlerContext ctx, ByteBuf buf) throws IOExcep } Message value; if (call.responseDefaultType != null) { - Builder builder = call.responseDefaultType.newBuilderForType(); + Message.Builder builder = call.responseDefaultType.newBuilderForType(); builder.mergeDelimitedFrom(in); value = builder.build(); } else { @@ -202,7 +203,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception } } - private void cleanupCalls(ChannelHandlerContext ctx, IOException error) { + private void cleanupCalls(IOException error) { for (Call call : id2Call.values()) { call.setException(error); } @@ -212,7 +213,7 @@ private void cleanupCalls(ChannelHandlerContext ctx, IOException error) { @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { if (!id2Call.isEmpty()) { - cleanupCalls(ctx, new ConnectionClosedException("Connection closed")); + cleanupCalls(new ConnectionClosedException("Connection closed")); } conn.shutdown(); ctx.fireChannelInactive(); @@ -221,7 +222,7 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { if (!id2Call.isEmpty()) { - cleanupCalls(ctx, IPCUtil.toIOE(cause)); + cleanupCalls(IPCUtil.toIOE(cause)); } conn.shutdown(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java index 62d0bb1d4550..ce60279ca8db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java @@ -123,30 +123,22 @@ private IOException instantiateException(Class cls) throw return ex; } - /** - * @return null if not set - */ + /** Return null if not set */ public String getHostname() { return this.hostname; } - /** - * @return -1 if not set - */ + /** Return -1 if not set */ public int getPort() { return this.port; } - /** - * @return True if origin exception was a do not retry type. - */ + /** Return true if origin exception was a do not retry type. */ public boolean isDoNotRetry() { return this.doNotRetry; } - /** - * @return True if the server was considered overloaded when the exception was thrown. - */ + /** Return true if the server was considered overloaded when the exception was thrown. */ public boolean isServerOverloaded() { return serverOverloaded; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index 6ecff49e52b1..7db9cd8b585f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -90,8 +90,8 @@ public interface RpcClient extends Closeable { void close(); /** - * @return true when this client uses a {@link org.apache.hadoop.hbase.codec.Codec} and so - * supports cell blocks. + * Returns true when this client uses a {@link org.apache.hadoop.hbase.codec.Codec} and so + * supports cell blocks. */ boolean hasCellBlockSupport(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java index 6c22ca94e428..83fc3927eb1a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerTooBusyException.java @@ -34,6 +34,7 @@ public ServerTooBusyException(Address address, long count) { } @Deprecated + @SuppressWarnings("InlineMeSuggester") public ServerTooBusyException(InetSocketAddress address, long count) { super("Busy Server! " + count + " concurrent RPCs against " + address); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index 2b0f2f4509e4..065b85279b20 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.master; -import java.util.Date; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -386,7 +385,7 @@ public String toString() { */ public String toDescriptiveString() { long relTime = EnvironmentEdgeManager.currentTime() - stamp; - return hri.getRegionNameAsString() + " state=" + state + ", ts=" + new Date(stamp) + " (" + return hri.getRegionNameAsString() + " state=" + state + ", ts=" + stamp + " (" + (relTime / 1000) + "s ago)" + ", server=" + serverName; } @@ -417,12 +416,13 @@ public static RegionState convert(ClusterStatusProtos.RegionState proto) { */ @Override public boolean equals(Object obj) { - if (this == obj) return true; - if (obj == null || getClass() != obj.getClass()) { + if (this == obj) { + return true; + } + if (!(obj instanceof RegionState)) { return false; } RegionState tmp = (RegionState) obj; - return RegionInfo.COMPARATOR.compare(tmp.hri, hri) == 0 && tmp.state == state && ((serverName != null && serverName.equals(tmp.serverName)) || (tmp.serverName == null && serverName == null)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java index 7c6f780e069d..2f56a0c87446 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java @@ -35,6 +35,7 @@ private ProtobufMagic() { public static final byte[] PB_MAGIC = new byte[] { 'P', 'B', 'U', 'F' }; /** + * Check if the byte array has {@link #PB_MAGIC} for a prefix. * @param bytes Bytes to check. * @return True if passed bytes has {@link #PB_MAGIC} for a prefix. */ @@ -43,10 +44,8 @@ public static boolean isPBMagicPrefix(final byte[] bytes) { return isPBMagicPrefix(bytes, 0, bytes.length); } - /* - * Copied from Bytes.java to here hbase-common now depends on hbase-protocol Referencing - * Bytes.java directly would create circular dependency - */ + // Copied from Bytes.java to here hbase-common now depends on hbase-protocol Referencing + // Bytes.java directly would create circular dependency private static int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, int length2) { // Short circuit equal case @@ -67,6 +66,7 @@ private static int compareTo(byte[] buffer1, int offset1, int length1, byte[] bu } /** + * Check if the byte array has {@link #PB_MAGIC} for a prefix. * @param bytes Bytes to check. * @param offset offset to start at * @param len length to use @@ -77,9 +77,7 @@ public static boolean isPBMagicPrefix(final byte[] bytes, int offset, int len) { return compareTo(PB_MAGIC, 0, PB_MAGIC.length, bytes, offset, PB_MAGIC.length) == 0; } - /** - * @return Length of {@link #PB_MAGIC} - */ + /** Return length of {@link #PB_MAGIC} */ public static int lengthOfPBMagic() { return PB_MAGIC.length; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java index 9ddd408f845c..930393c1e546 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaFilter.java @@ -92,32 +92,32 @@ public QuotaFilter addTypeFilter(final QuotaType type) { return this; } - /** @return true if the filter is empty */ + /** Return true if the filter is empty */ public boolean isNull() { return !hasFilters; } - /** @return the QuotaType types that we want to filter one */ + /** Return the QuotaType types that we want to filter one */ public Set getTypeFilters() { return types; } - /** @return the Namespace filter regex */ + /** Return the Namespace filter regex */ public String getNamespaceFilter() { return namespaceRegex; } - /** @return the Table filter regex */ + /** Return the Table filter regex */ public String getTableFilter() { return tableRegex; } - /** @return the User filter regex */ + /** Return the User filter regex */ public String getUserFilter() { return userRegex; } - /** @return the RegionServer filter regex */ + /** Return the RegionServer filter regex */ public String getRegionServerFilter() { return regionServerRegex; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java index 728959e0a0ca..1dd5bf275bba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -19,8 +19,8 @@ import java.io.Closeable; import java.io.IOException; +import java.util.ArrayDeque; import java.util.Iterator; -import java.util.LinkedList; import java.util.Objects; import java.util.Queue; import org.apache.hadoop.conf.Configuration; @@ -43,7 +43,7 @@ public class QuotaRetriever implements Closeable, Iterable { private static final Logger LOG = LoggerFactory.getLogger(QuotaRetriever.class); - private final Queue cache = new LinkedList<>(); + private final Queue cache = new ArrayDeque<>(); private ResultScanner scanner; /** * Connection to use. Could pass one in and have this class use it but this class wants to be diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java index 878cbe871e57..34cf40046732 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java @@ -23,10 +23,8 @@ import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest.Builder; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; /** * A {@link QuotaSettings} implementation for configuring filesystem-use quotas. @@ -35,7 +33,7 @@ @InterfaceStability.Evolving class SpaceLimitSettings extends QuotaSettings { - private final SpaceLimitRequest proto; + private final QuotaProtos.SpaceLimitRequest proto; SpaceLimitSettings(TableName tableName, long sizeLimit, SpaceViolationPolicy violationPolicy) { super(null, Objects.requireNonNull(tableName), null, null); @@ -65,45 +63,46 @@ class SpaceLimitSettings extends QuotaSettings { proto = buildProtoRemoveQuota(); } - SpaceLimitSettings(TableName tableName, String namespace, SpaceLimitRequest req) { + SpaceLimitSettings(TableName tableName, String namespace, QuotaProtos.SpaceLimitRequest req) { super(null, tableName, namespace, null); proto = req; } /** - * Build a {@link SpaceLimitRequest} protobuf object from the given {@link SpaceQuota}. + * Build a {@link QuotaProtos.SpaceLimitRequest} protobuf object from the given + * {@link SpaceQuota}. * @param protoQuota The preconstructed SpaceQuota protobuf * @return A protobuf request to change a space limit quota */ - private SpaceLimitRequest buildProtoFromQuota(SpaceQuota protoQuota) { - return SpaceLimitRequest.newBuilder().setQuota(protoQuota).build(); + private QuotaProtos.SpaceLimitRequest buildProtoFromQuota(QuotaProtos.SpaceQuota protoQuota) { + return QuotaProtos.SpaceLimitRequest.newBuilder().setQuota(protoQuota).build(); } /** - * Builds a {@link SpaceQuota} protobuf object given the arguments. + * Builds a {@link QuotaProtos.SpaceQuota} protobuf object given the arguments. * @param sizeLimit The size limit of the quota. * @param violationPolicy The action to take when the quota is exceeded. * @return The protobuf SpaceQuota representation. */ - private SpaceLimitRequest buildProtoAddQuota(long sizeLimit, + private QuotaProtos.SpaceLimitRequest buildProtoAddQuota(long sizeLimit, SpaceViolationPolicy violationPolicy) { - return buildProtoFromQuota(SpaceQuota.newBuilder().setSoftLimit(sizeLimit) + return buildProtoFromQuota(QuotaProtos.SpaceQuota.newBuilder().setSoftLimit(sizeLimit) .setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(violationPolicy)).build()); } /** - * Builds a {@link SpaceQuota} protobuf object to remove a quota. + * Builds a {@link QuotaProtos.SpaceQuota} protobuf object to remove a quota. * @return The protobuf SpaceQuota representation. */ - private SpaceLimitRequest buildProtoRemoveQuota() { - return SpaceLimitRequest.newBuilder().setQuota(SpaceQuota.newBuilder().setRemove(true).build()) - .build(); + private QuotaProtos.SpaceLimitRequest buildProtoRemoveQuota() { + return QuotaProtos.SpaceLimitRequest.newBuilder() + .setQuota(QuotaProtos.SpaceQuota.newBuilder().setRemove(true).build()).build(); } /** * Returns a copy of the internal state of this */ - SpaceLimitRequest getProto() { + QuotaProtos.SpaceLimitRequest getProto() { return proto.toBuilder().build(); } @@ -113,7 +112,7 @@ public QuotaType getQuotaType() { } @Override - protected void setupSetQuotaRequest(Builder builder) { + protected void setupSetQuotaRequest(MasterProtos.SetQuotaRequest.Builder builder) { // TableName/Namespace are serialized in QuotaSettings builder.setSpaceLimit(proto); } @@ -203,7 +202,7 @@ protected QuotaSettings merge(QuotaSettings newSettings) { // The message contained the expect SpaceQuota object if (settingsToMerge.proto.hasQuota()) { - SpaceQuota quotaToMerge = settingsToMerge.proto.getQuota(); + QuotaProtos.SpaceQuota quotaToMerge = settingsToMerge.proto.getQuota(); if (quotaToMerge.getRemove()) { return settingsToMerge; } else { @@ -216,7 +215,7 @@ protected QuotaSettings merge(QuotaSettings newSettings) { throw new IllegalArgumentException("Cannot merge " + newSettings + " into " + this); } // Create a builder from the old settings - SpaceQuota.Builder mergedBuilder = this.proto.getQuota().toBuilder(); + QuotaProtos.SpaceQuota.Builder mergedBuilder = this.proto.getQuota().toBuilder(); // Build a new SpaceQuotas object from merging in the new settings return new SpaceLimitSettings(getTableName(), getNamespace(), buildProtoFromQuota(mergedBuilder.mergeFrom(quotaToMerge).build())); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotView.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotView.java index 4bc47b9cddd3..9db7282e59b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotView.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceQuotaSnapshotView.java @@ -40,7 +40,7 @@ interface SpaceQuotaStatusView { Optional getPolicy(); /** - * @return {@code true} if the quota is being violated, {@code false} otherwise. + * Return {@code true} if the quota is being violated, {@code false} otherwise. */ boolean isInViolation(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java index a75091c5293d..b21d987c8ebd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java @@ -51,6 +51,7 @@ public NettyHBaseRpcConnectionHeaderHandler(Promise saslPromise, Config } @Override + @SuppressWarnings("FutureReturnValueIgnored") protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { // read the ConnectionHeaderResponse from server int len = msg.readInt(); @@ -72,6 +73,7 @@ protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Excep } @Override + @SuppressWarnings("FutureReturnValueIgnored") public void handlerAdded(ChannelHandlerContext ctx) { try { // send the connection header to server first diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java index 7473c3269b04..172dc6619f65 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java @@ -75,12 +75,14 @@ public NettyHBaseSaslRpcClientHandler(Promise saslPromise, UserGroupInf SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); } + @SuppressWarnings("FutureReturnValueIgnored") private void writeResponse(ChannelHandlerContext ctx, byte[] response) { LOG.trace("Sending token size={} from initSASLContext.", response.length); ctx.writeAndFlush( ctx.alloc().buffer(4 + response.length).writeInt(response.length).writeBytes(response)); } + @SuppressWarnings("FutureReturnValueIgnored") private void tryComplete(ChannelHandlerContext ctx) { if (!saslRpcClient.isComplete()) { return; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java index c2dc1042c913..f16c1e2d0069 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java @@ -93,9 +93,10 @@ public static QualityOfProtection getQop(String stringQop) { } /** + * Return a map with values for SASL properties. * @param rpcProtection Value of 'hbase.rpc.protection' configuration. - * @return Map with values for SASL properties. */ + @SuppressWarnings("StringSplitter") public static Map initSaslProperties(String rpcProtection) { String saslQop; if (rpcProtection.isEmpty()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index b6986b564ac8..a1c673180c75 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -29,16 +29,11 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.security.SecurityCapability; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.AccessControlService.BlockingInterface; - /** * Utility client for doing access control admin operations. */ @@ -67,17 +62,11 @@ public static boolean isCellAuthorizationEnabled(Connection connection) throws I .contains(SecurityCapability.CELL_AUTHORIZATION); } - private static BlockingInterface getAccessControlServiceStub(Table ht) throws IOException { - CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); - BlockingInterface protocol = AccessControlProtos.AccessControlService.newBlockingStub(service); - return protocol; - } - /** - * Grants permission on the specified table for the specified user - * @param connection The Connection instance to use nnnn * @param mergeExistingPermissions If set - * to false, later granted permissions will override previous granted - * permissions. otherwise, it'll merge with previous granted permissions. nn + * Grants permission on the specified table for the specified user. + *

+ * If mergeExistingPermissions is set to false, later granted permissions will override previous + * granted permissions. otherwise, it'll merge with previous granted permissions. */ private static void grant(Connection connection, final TableName tableName, final String userName, final byte[] family, final byte[] qual, boolean mergeExistingPermissions, @@ -90,7 +79,6 @@ private static void grant(Connection connection, final TableName tableName, fina /** * Grants permission on the specified table for the specified user. If permissions for a specified * user exists, later granted permissions will override previous granted permissions. - * @param connection The Connection instance to use nnnnnn */ public static void grant(Connection connection, final TableName tableName, final String userName, final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable { @@ -98,9 +86,10 @@ public static void grant(Connection connection, final TableName tableName, final } /** - * Grants permission on the specified namespace for the specified user. nnn * @param - * mergeExistingPermissions If set to false, later granted permissions will override previous - * granted permissions. otherwise, it'll merge with previous granted permissions. nn + * Grants permission on the specified namespace for the specified user. + *

+ * If mergeExistingPermissions is set to false, later granted permissions will override previous + * granted permissions. otherwise, it'll merge with previous granted permissions. */ private static void grant(Connection connection, final String namespace, final String userName, boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { @@ -113,7 +102,6 @@ private static void grant(Connection connection, final String namespace, final S * Grants permission on the specified namespace for the specified user. If permissions on the * specified namespace exists, later granted permissions will override previous granted * permissions. - * @param connection The Connection instance to use nnnn */ public static void grant(Connection connection, final String namespace, final String userName, final Permission.Action... actions) throws Throwable { @@ -121,9 +109,10 @@ public static void grant(Connection connection, final String namespace, final St } /** - * Grant global permissions for the specified user. nn * @param mergeExistingPermissions If set to - * false, later granted permissions will override previous granted permissions. otherwise, it'll - * merge with previous granted permissions. nn + * Grant global permissions for the specified user. + *

+ * If mergeExistingPermissions is set to false, later granted permissions will override previous + * granted permissions. otherwise, it'll merge with previous granted permissions. */ private static void grant(Connection connection, final String userName, boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { @@ -134,7 +123,7 @@ private static void grant(Connection connection, final String userName, /** * Grant global permissions for the specified user. If permissions for the specified user exists, - * later granted permissions will override previous granted permissions. nnnn + * later granted permissions will override previous granted permissions. */ public static void grant(Connection connection, final String userName, final Permission.Action... actions) throws Throwable { @@ -148,10 +137,7 @@ public static boolean isAccessControllerRunning(Connection connection) } } - /** - * Revokes the permission on the table - * @param connection The Connection instance to use nnnnnn - */ + /** Revokes the permission on the table. */ public static void revoke(Connection connection, final TableName tableName, final String username, final byte[] family, final byte[] qualifier, final Permission.Action... actions) throws Throwable { @@ -159,20 +145,14 @@ public static void revoke(Connection connection, final TableName tableName, fina .withFamily(family).withQualifier(qualifier).withActions(actions).build())); } - /** - * Revokes the permission on the namespace for the specified user. - * @param connection The Connection instance to use nnnn - */ + /** Revokes the permission on the namespace for the specified user. */ public static void revoke(Connection connection, final String namespace, final String userName, final Permission.Action... actions) throws Throwable { connection.getAdmin().revoke( new UserPermission(userName, Permission.newBuilder(namespace).withActions(actions).build())); } - /** - * Revoke global permissions for the specified user. - * @param connection The Connection instance to use - */ + /** Revoke global permissions for the specified user. */ public static void revoke(Connection connection, final String userName, final Permission.Action... actions) throws Throwable { connection.getAdmin() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java index 066e6f4e04da..3f5d121179f3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java @@ -42,9 +42,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; -/** - * @since 2.0.0 - */ @InterfaceAudience.Private public class AccessControlUtil { private AccessControlUtil() { @@ -226,13 +223,12 @@ public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions(Stri */ public static TablePermission toTablePermission(AccessControlProtos.TablePermission proto) { Permission.Action[] actions = toPermissionActions(proto.getActionList()); - TableName table = null; byte[] qualifier = null; byte[] family = null; if (!proto.hasTableName()) { throw new IllegalStateException("TableName cannot be empty"); } - table = ProtobufUtil.toTableName(proto.getTableName()); + TableName table = ProtobufUtil.toTableName(proto.getTableName()); if (proto.hasFamily()) { family = proto.getFamily().toByteArray(); } @@ -267,11 +263,10 @@ public static Permission toPermission(AccessControlProtos.Permission proto) { Permission.Action[] actions = toPermissionActions(perm.getActionList()); byte[] qualifier = null; byte[] family = null; - TableName table = null; if (!perm.hasTableName()) { throw new IllegalStateException("TableName cannot be empty"); } - table = ProtobufUtil.toTableName(perm.getTableName()); + TableName table = ProtobufUtil.toTableName(perm.getTableName()); if (perm.hasFamily()) { family = perm.getFamily().toByteArray(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java index b6df2c94a044..604e705315f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.HasUserPermissionsRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -292,14 +291,14 @@ public static RevokeRequest buildRevokeRequest(UserPermission userPermission) { } if (request.getNamespace() != null && !request.getNamespace().isEmpty()) { builder.setNamespaceName(ByteString.copyFromUtf8(request.getNamespace())); - builder.setType(Type.Namespace); + builder.setType(AccessControlProtos.Permission.Type.Namespace); } if (request.getTableName() != null) { builder.setTableName(toProtoTableName(request.getTableName())); - builder.setType(Type.Table); + builder.setType(AccessControlProtos.Permission.Type.Table); } if (!builder.hasType()) { - builder.setType(Type.Global); + builder.setType(AccessControlProtos.Permission.Type.Global); } if (request.getFamily() != null && request.getFamily().length > 0) { builder.setColumnFamily(ByteString.copyFrom(request.getFamily())); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java index e0a12c7d431f..65ebbef7da09 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java @@ -141,11 +141,11 @@ public boolean implies(TableName table, KeyValue kv, Action action) { return false; } - if (family != null && !(CellUtil.matchingFamily(kv, family))) { + if (family != null && !CellUtil.matchingFamily(kv, family)) { return false; } - if (qualifier != null && !(CellUtil.matchingQualifier(kv, qualifier))) { + if (qualifier != null && !CellUtil.matchingQualifier(kv, qualifier)) { return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java index e9990066050e..9fed3a9c624d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java @@ -48,11 +48,14 @@ public final int hashCode() { } @Override + @SuppressWarnings("EqualsUsingHashCode") public final boolean equals(Object o) { - // SaslClientAuthProviders should be unique via their hashCode(). - if (o instanceof AbstractSaslClientAuthenticationProvider) { - return this.hashCode() == o.hashCode(); + if (this == o) { + return true; } - return false; + if (!(o instanceof AbstractSaslClientAuthenticationProvider)) { + return false; + } + return this.hashCode() == ((AbstractSaslClientAuthenticationProvider) o).hashCode(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java index 40ff0373c36c..71df9cd9bc41 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java @@ -66,6 +66,7 @@ private static void injectFault() throws ServiceException { * @return the authentication token instance, wrapped by a {@link CompletableFuture}. */ @InterfaceAudience.Private + @SuppressWarnings("FutureReturnValueIgnored") public static CompletableFuture> obtainToken(AsyncConnection conn) { CompletableFuture> future = new CompletableFuture<>(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java index 8abaee005094..4343df594cf9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java @@ -36,7 +36,7 @@ public CellVisibility(String expression) { } /** - * @return The visibility expression + * Return The visibility expression */ public String getExpression() { return this.expression; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index 42508782d1a7..5f2fe2ccf74a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -88,7 +88,7 @@ public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOE for (String label : labels) { if (label.length() > 0) { VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); - newBuilder.setLabel(UnsafeByteOperations.unsafeWrap((Bytes.toBytes(label)))); + newBuilder.setLabel(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(label))); builder.addVisLabel(newBuilder.build()); } } @@ -108,18 +108,13 @@ public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOE } } - /** - * Sets given labels globally authorized for the user. nnnnn - */ + /** Sets given labels globally authorized for the user. */ public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths, final String user) throws Throwable { return setOrClearAuths(connection, auths, user, true); } - /** - * @param connection the Connection instance to use. n * @return labels, the given user is - * globally authorized for. n - */ + /** Get the authentication details for a given user. */ public static GetAuthsResponse getAuths(Connection connection, final String user) throws Throwable { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { @@ -212,7 +207,7 @@ public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOE setAuthReqBuilder.setUser(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(user))); for (String auth : auths) { if (auth.length() > 0) { - setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); + setAuthReqBuilder.addAuth(ByteString.copyFromUtf8(auth)); } } if (setOrClear) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 40b32d53c39e..c54676c0656c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.CacheEvictionStatsBuilder; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; @@ -310,6 +309,7 @@ public static byte[] prependPBMagic(final byte[] bytes) { } /** + * Check if the passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. * @param bytes Bytes to check. * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ @@ -318,6 +318,7 @@ public static boolean isPBMagicPrefix(final byte[] bytes) { } /** + * Check if the passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. * @param bytes Bytes to check. * @param offset offset to start at * @param len length to use @@ -328,6 +329,7 @@ public static boolean isPBMagicPrefix(final byte[] bytes, int offset, int len) { } /** + * Assert the passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. * @param bytes bytes to check * @throws DeserializationException if we are missing the pb magic prefix */ @@ -339,9 +341,7 @@ public static void expectPBMagicPrefix(final byte[] bytes) throws Deserializatio } } - /** - * @return Length of {@link ProtobufMagic#lengthOfPBMagic()} - */ + /** Return the length of {@link ProtobufMagic#lengthOfPBMagic()} */ public static int lengthOfPBMagic() { return ProtobufMagic.lengthOfPBMagic(); } @@ -734,7 +734,7 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner } else { put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts).setType(Type.Put) + .setTimestamp(ts).setType(Cell.Type.Put) .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).build()); } } @@ -891,9 +891,9 @@ private static long cellTimestampOrLatest(QualifierValue cell) { } /** - * Convert a protocol buffer Mutate to an Append n * @param proto the protocol buffer Mutate to - * convert - * @return the converted client Append n + * Convert a protocol buffer Mutate to an Append + * @param proto the protocol buffer Mutate to convert + * @return the converted client Append */ public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) throws IOException { @@ -911,7 +911,7 @@ public static Append toAppend(final MutationProto proto, final CellScanner cellS /** * Convert a protocol buffer Mutate to an Increment * @param proto the protocol buffer Mutate to convert - * @return the converted client Increment n + * @return the converted client Increment */ public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) throws IOException { @@ -930,7 +930,7 @@ public static Increment toIncrement(final MutationProto proto, final CellScanner /** * Convert a MutateRequest to Mutation * @param proto the protocol buffer Mutate to convert - * @return the converted Mutation n + * @return the converted Mutation */ public static Mutation toMutation(final MutationProto proto) throws IOException { MutationType type = proto.getMutateType(); @@ -978,7 +978,7 @@ public static Scan.ReadType toReadType(ClientProtos.Scan.ReadType readType) { /** * Convert a client Scan to a protocol buffer Scan * @param scan the client Scan to convert - * @return the converted protocol buffer Scan n + * @return the converted protocol buffer Scan */ public static ClientProtos.Scan toScan(final Scan scan) throws IOException { ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder(); @@ -1072,7 +1072,7 @@ public static ClientProtos.Scan toScan(final Scan scan) throws IOException { /** * Convert a protocol buffer Scan to a client Scan * @param proto the protocol buffer Scan to convert - * @return the converted client Scan n + * @return the converted client Scan */ public static Scan toScan(final ClientProtos.Scan proto) throws IOException { byte[] startRow = HConstants.EMPTY_START_ROW; @@ -1192,7 +1192,7 @@ public static Cursor toCursor(ClientProtos.Cursor cursor) { /** * Create a protocol buffer Get based on a client Get. * @param get the client Get - * @return a protocol buffer Get n + * @return a protocol buffer Get */ public static ClientProtos.Get toGet(final Get get) throws IOException { ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder(); @@ -1258,7 +1258,8 @@ public static MutationProto toMutation(final MutationType type, final Mutation m } /** - * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n + * Create a protocol buffer Mutate based on a client Mutation + * @return a protobuf'd Mutation */ public static MutationProto toMutation(final MutationType type, final Mutation mutation, final long nonce) throws IOException { @@ -1307,8 +1308,8 @@ public static MutationProto toMutation(final MutationType type, final Mutation m /** * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a - * protobuf'd Mutation n + * Understanding is that the Cell will be transported other than via protobuf. + * @return a protobuf'd Mutation */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, final MutationProto.Builder builder) throws IOException { @@ -1317,8 +1318,8 @@ public static MutationProto toMutationNoData(final MutationType type, final Muta /** * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. - * Understanding is that the Cell will be transported other than via protobuf. nn * @return a - * protobuf'd Mutation n + * Understanding is that the Cell will be transported other than via protobuf. + * @return a protobuf'd Mutation */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation) throws IOException { @@ -1344,8 +1345,8 @@ public static MutationProto toMutationNoData(final MutationType type, final Muta /** * Code shared by {@link #toMutation(MutationType, Mutation)} and - * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd - * Mutation. + * {@link #toMutationNoData(MutationType, Mutation)} + * @return A partly-filled out protobuf'd Mutation. */ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type, final Mutation mutation, MutationProto.Builder builder) { @@ -1478,7 +1479,7 @@ public static Result toResult(final ClientProtos.Result proto, boolean decodeTag * Convert a protocol buffer Result to a client Result * @param proto the protocol buffer Result to convert * @param scanner Optional cell scanner. - * @return the converted client Result n + * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner) throws IOException { @@ -1593,8 +1594,8 @@ public static FilterProtos.Filter toFilter(Filter filter) throws IOException { } /** - * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer - * DeleteType n + * Convert a delete KeyValue type to protocol buffer DeleteType. + * @return protocol buffer DeleteType */ public static DeleteType toDeleteType(KeyValue.Type type) throws IOException { switch (type) { @@ -1614,7 +1615,7 @@ public static DeleteType toDeleteType(KeyValue.Type type) throws IOException { /** * Convert a protocol buffer DeleteType to delete KeyValue type. * @param type The DeleteType - * @return The type. n + * @return The type */ public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException { switch (type) { @@ -1680,7 +1681,6 @@ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( final RpcController controller, final AdminService.BlockingInterface admin, final byte[] regionName) throws IOException { try { - GetRegionInfoRequest request = getGetRegionInfoRequest(regionName); GetRegionInfoResponse response = admin.getRegionInfo(controller, getGetRegionInfoRequest(regionName)); return toRegionInfo(response.getRegionInfo()); @@ -1689,9 +1689,7 @@ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( } } - /** - * @return A GetRegionInfoRequest for the passed in regionName. - */ + /** Return a GetRegionInfoRequest for the passed in regionName. */ public static GetRegionInfoRequest getGetRegionInfoRequest(final byte[] regionName) throws IOException { return org.apache.hadoop.hbase.client.RegionInfo.isEncodedRegionName(regionName) @@ -1702,9 +1700,7 @@ public static GetRegionInfoRequest getGetRegionInfoRequest(final byte[] regionNa : RequestConverter.buildGetRegionInfoRequest(regionName); } - /** - * A helper to close a region given a region name using admin protocol. nnn - */ + /** A helper to close a region given a region name using admin protocol. */ public static void closeRegion(final RpcController controller, final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName) throws IOException { @@ -1717,9 +1713,7 @@ public static void closeRegion(final RpcController controller, } } - /** - * A helper to warmup a region given a region name using admin protocol nn * - */ + /** A helper to warmup a region given a region name using admin protocol */ public static void warmupRegion(final RpcController controller, final AdminService.BlockingInterface admin, final org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException { @@ -1734,9 +1728,7 @@ public static void warmupRegion(final RpcController controller, } } - /** - * A helper to open a region using admin protocol. nnn - */ + /** A helper to open a region using admin protocol. */ public static void openRegion(final RpcController controller, final AdminService.BlockingInterface admin, ServerName server, final org.apache.hadoop.hbase.client.RegionInfo region) throws IOException { @@ -1748,19 +1740,13 @@ public static void openRegion(final RpcController controller, } } - /** - * A helper to get the all the online regions on a region server using admin protocol. n * @return - * a list of online region info n - */ + /** A helper to get the all the online regions on a region server using admin protocol. */ public static List getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException { return getOnlineRegions(null, admin); } - /** - * A helper to get the all the online regions on a region server using admin protocol. - * @return a list of online region info - */ + /** A helper to get the all the online regions on a region server using admin protocol. */ public static List getOnlineRegions( final RpcController controller, final AdminService.BlockingInterface admin) throws IOException { GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); @@ -1789,10 +1775,7 @@ public static List getOnlineRegions( return regionInfos; } - /** - * A helper to get the info of a region server using admin protocol. - * @return the server name - */ + /** A helper to get the info of a region server using admin protocol. */ public static ServerInfo getServerInfo(final RpcController controller, final AdminService.BlockingInterface admin) throws IOException { GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); @@ -1806,7 +1789,6 @@ public static ServerInfo getServerInfo(final RpcController controller, /** * A helper to get the list of files of a column family on a given region using admin protocol. - * @return the list of store files */ public static List getStoreFiles(final AdminService.BlockingInterface admin, final byte[] regionName, final byte[] family) throws IOException { @@ -1815,7 +1797,6 @@ public static List getStoreFiles(final AdminService.BlockingInterface ad /** * A helper to get the list of files of a column family on a given region using admin protocol. - * @return the list of store files */ public static List getStoreFiles(final RpcController controller, final AdminService.BlockingInterface admin, final byte[] regionName, final byte[] family) @@ -1844,9 +1825,7 @@ public static long getTotalRequestsCount(RegionLoad rl) { return rl.getReadRequestsCount() + rl.getWriteRequestsCount(); } - /** - * @param m Message to get delimited pb serialization of (with pb magic prefix) - */ + /** Convert a Message to delimited pb serialization of (with pb magic prefix) */ public static byte[] toDelimitedByteArray(final Message m) throws IOException { // Allocate arbitrary big size so we avoid resizing. ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); @@ -2085,7 +2064,8 @@ public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, /** * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to * NOT print out data both because it can be big but also so we do not have data in our logs. Use - * judiciously. n * @return toString of passed m + * judiciously. + * @return toString of passed m */ public static String getShortTextFormat(Message m) { if (m == null) return "null"; @@ -2231,19 +2211,13 @@ public static TableName[] getTableNameArray(List tableNam return tableNames; } - /** - * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted - * client CellVisibility - */ + /** Convert a protocol buffer CellVisibility to a client CellVisibility */ public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) { if (proto == null) return null; return new CellVisibility(proto.getExpression()); } - /** - * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the - * converted client CellVisibility n - */ + /** Convert a protocol buffer CellVisibility bytes to a client CellVisibility */ public static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException { if (protoBytes == null) return null; ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder(); @@ -2257,29 +2231,20 @@ public static CellVisibility toCellVisibility(byte[] protoBytes) throws Deserial return toCellVisibility(proto); } - /** - * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a - * protocol buffer CellVisibility - */ + /** Create a protocol buffer CellVisibility based on a client CellVisibility. */ public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) { ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder(); builder.setExpression(cellVisibility.getExpression()); return builder.build(); } - /** - * Convert a protocol buffer Authorizations to a client Authorizations n * @return the converted - * client Authorizations - */ + /** Convert a protocol buffer Authorizations to a client Authorizations */ public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) { if (proto == null) return null; return new Authorizations(proto.getLabelList()); } - /** - * Convert a protocol buffer Authorizations bytes to a client Authorizations n * @return the - * converted client Authorizations n - */ + /** Convert a protocol buffer Authorizations bytes to a client Authorizations */ public static Authorizations toAuthorizations(byte[] protoBytes) throws DeserializationException { if (protoBytes == null) return null; ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder(); @@ -2293,10 +2258,7 @@ public static Authorizations toAuthorizations(byte[] protoBytes) throws Deserial return toAuthorizations(proto); } - /** - * Create a protocol buffer Authorizations based on a client Authorizations. n * @return a - * protocol buffer Authorizations - */ + /** Create a protocol buffer Authorizations based on a client Authorizations. */ public static ClientProtos.Authorizations toAuthorizations(Authorizations authorizations) { ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder(); for (String label : authorizations.getLabels()) { @@ -2305,10 +2267,7 @@ public static ClientProtos.Authorizations toAuthorizations(Authorizations author return builder.build(); } - /** - * Convert a protocol buffer TimeUnit to a client TimeUnit n * @return the converted client - * TimeUnit - */ + /** Convert a protocol buffer TimeUnit to a client TimeUnit */ public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) { switch (proto) { case NANOSECONDS: @@ -2329,10 +2288,7 @@ public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) { throw new RuntimeException("Invalid TimeUnit " + proto); } - /** - * Convert a client TimeUnit to a protocol buffer TimeUnit n * @return the converted protocol - * buffer TimeUnit - */ + /** Convert a client TimeUnit to a protocol buffer TimeUnit */ public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) { switch (timeUnit) { case NANOSECONDS: @@ -2353,10 +2309,7 @@ public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) { throw new RuntimeException("Invalid TimeUnit " + timeUnit); } - /** - * Convert a protocol buffer ThrottleType to a client ThrottleType n * @return the converted - * client ThrottleType - */ + /** Convert a protocol buffer ThrottleType to a client ThrottleType */ public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) { switch (proto) { case REQUEST_NUMBER: @@ -2382,10 +2335,7 @@ public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) } } - /** - * Convert a client ThrottleType to a protocol buffer ThrottleType n * @return the converted - * protocol buffer ThrottleType - */ + /** Convert a client ThrottleType to a protocol buffer ThrottleType */ public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) { switch (type) { case REQUEST_NUMBER: @@ -2411,10 +2361,7 @@ public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType ty } } - /** - * Convert a protocol buffer QuotaScope to a client QuotaScope n * @return the converted client - * QuotaScope - */ + /** Convert a protocol buffer QuotaScope to a client QuotaScope */ public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) { switch (proto) { case CLUSTER: @@ -2425,10 +2372,7 @@ public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) { throw new RuntimeException("Invalid QuotaScope " + proto); } - /** - * Convert a client QuotaScope to a protocol buffer QuotaScope n * @return the converted protocol - * buffer QuotaScope - */ + /** Convert a client QuotaScope to a protocol buffer QuotaScope */ public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) { switch (scope) { case CLUSTER: @@ -2439,10 +2383,7 @@ public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) { throw new RuntimeException("Invalid QuotaScope " + scope); } - /** - * Convert a protocol buffer QuotaType to a client QuotaType n * @return the converted client - * QuotaType - */ + /** Convert a protocol buffer QuotaType to a client QuotaType */ public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) { switch (proto) { case THROTTLE: @@ -2453,10 +2394,7 @@ public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) { throw new RuntimeException("Invalid QuotaType " + proto); } - /** - * Convert a client QuotaType to a protocol buffer QuotaType n * @return the converted protocol - * buffer QuotaType - */ + /** Convert a client QuotaType to a protocol buffer QuotaType */ public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) { switch (type) { case THROTTLE: @@ -2468,11 +2406,7 @@ public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) { } } - /** - * Converts a protocol buffer SpaceViolationPolicy to a client SpaceViolationPolicy. - * @param proto The protocol buffer space violation policy. - * @return The corresponding client SpaceViolationPolicy. - */ + /** Converts a protocol buffer SpaceViolationPolicy to a client SpaceViolationPolicy. */ public static SpaceViolationPolicy toViolationPolicy(final QuotaProtos.SpaceViolationPolicy proto) { switch (proto) { @@ -2488,11 +2422,7 @@ public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) { throw new RuntimeException("Invalid SpaceViolationPolicy " + proto); } - /** - * Converts a client SpaceViolationPolicy to a protocol buffer SpaceViolationPolicy. - * @param policy The client SpaceViolationPolicy object. - * @return The corresponding protocol buffer SpaceViolationPolicy. - */ + /** Converts a client SpaceViolationPolicy to a protocol buffer SpaceViolationPolicy. */ public static QuotaProtos.SpaceViolationPolicy toProtoViolationPolicy(final SpaceViolationPolicy policy) { switch (policy) { @@ -2954,9 +2884,7 @@ public static RegionLoadStats createRegionLoadStats(ClientProtos.RegionLoadStats stats.getCompactionPressure()); } - /** - * n * @return A String version of the passed in msg - */ + /** Return a String version of the passed in msg */ public static String toText(Message msg) { return TextFormat.shortDebugString(msg); } @@ -2965,9 +2893,7 @@ public static byte[] toBytes(ByteString bs) { return bs.toByteArray(); } - /** - * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. n - */ + /** Contain ServiceException inside. Take a callable that is doing our pb rpc and run it. */ public static T call(Callable callable) throws IOException { try { return callable.call(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index addda9c59860..d2b1fb7ea078 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -706,6 +706,7 @@ public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] region } /** + * Create a protocol buffer GetRegionInfo for all regions/regions of a table. * @param regionName the name of the region to get info * @param includeCompactionState indicate if the compaction state is requested * @param includeBestSplitRow indicate if the bestSplitRow is requested @@ -914,7 +915,7 @@ public static AddColumnRequest buildAddColumnRequest(final TableName tableName, public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName, final byte[] columnName, final long nonceGroup, final long nonce) { DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder(); - builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setColumnName(UnsafeByteOperations.unsafeWrap(columnName)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); @@ -927,7 +928,7 @@ public static DeleteColumnRequest buildDeleteColumnRequest(final TableName table public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName, final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder(); - builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); @@ -939,7 +940,7 @@ public static ModifyColumnStoreFileTrackerRequest buildModifyColumnStoreFileTrac final long nonce) { ModifyColumnStoreFileTrackerRequest.Builder builder = ModifyColumnStoreFileTrackerRequest.newBuilder(); - builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setFamily(ByteString.copyFrom(family)); builder.setDstSft(dstSFT); builder.setNonceGroup(nonceGroup); @@ -1061,7 +1062,7 @@ public static EnableTableRequest buildEnableTableRequest(final TableName tableNa public static DisableTableRequest buildDisableTableRequest(final TableName tableName, final long nonceGroup, final long nonce) { DisableTableRequest.Builder builder = DisableTableRequest.newBuilder(); - builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); return builder.build(); @@ -1090,7 +1091,7 @@ public static CreateTableRequest buildCreateTableRequest(final TableDescriptor t public static ModifyTableRequest buildModifyTableRequest(final TableName tableName, final TableDescriptor tableDesc, final long nonceGroup, final long nonce) { ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); - builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); @@ -1101,7 +1102,7 @@ public static ModifyTableStoreFileTrackerRequest buildModifyTableStoreFileTracke final TableName tableName, final String dstSFT, final long nonceGroup, final long nonce) { ModifyTableStoreFileTrackerRequest.Builder builder = ModifyTableStoreFileTrackerRequest.newBuilder(); - builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setDstSft(dstSFT); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java index 5fd62a86008c..4db5c7f1f3d5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/HBaseSnapshotException.java @@ -69,9 +69,7 @@ public HBaseSnapshotException(String message, Throwable cause) { super(message, cause); } - /** - * @return the description of the snapshot that is being failed - */ + /** Return the description of the snapshot that failed */ public SnapshotDescription getSnapshotDescription() { return this.description; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java index 7ebbbf44cebd..0cc8e6ad5f0d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -34,6 +34,7 @@ @InterfaceAudience.Private public class Writables { /** + * Write the writeable into a byte array and return it. * @param w writable * @return The bytes of w gotten by running its * {@link Writable#write(java.io.DataOutput)} method. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index 96170736208b..979094fda80b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -183,6 +183,8 @@ public void exec(ZooKeeper alwaysNull) { try { zk.close(); } catch (InterruptedException e) { + // Restore interrupt status + Thread.currentThread().interrupt(); } } if (ZKTask.this.delay(retryIntervalMs, maxRetries)) { @@ -303,6 +305,8 @@ private void closeZk() { try { zookeeper.close(); } catch (InterruptedException e) { + // Restore interrupt status + Thread.currentThread().interrupt(); } zookeeper = null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java index f0fae958a66a..b795ad45aa1c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java @@ -54,7 +54,7 @@ public static byte[] appendMetaData(byte[] id, byte[] data) { pos = Bytes.putInt(newData, pos, idLength); pos = Bytes.putBytes(newData, pos, id, 0, id.length); pos = Bytes.putBytes(newData, pos, salt, 0, salt.length); - pos = Bytes.putBytes(newData, pos, data, 0, data.length); + Bytes.putBytes(newData, pos, data, 0, data.length); return newData; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index 5d73504164ef..8f286b406697 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -133,9 +133,7 @@ public String toString() { .append(snapshotCleanupZNode).append("]").toString(); } - /** - * @return the znode string corresponding to a replicaId - */ + /** Return the znode string corresponding to a replicaId */ public String getZNodeForReplica(int replicaId) { if (RegionReplicaUtil.isDefaultReplica(replicaId)) { return joinZNode(baseZNode, metaZNodePrefix); @@ -165,16 +163,12 @@ public int getMetaReplicaIdFromZNode(String znode) { : Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); } - /** - * @return True if meta znode. - */ + /** Return true if meta znode. */ public boolean isMetaZNodePrefix(String znode) { return znode != null && znode.startsWith(this.metaZNodePrefix); } - /** - * @return True is the fully qualified path is for meta location - */ + /** Return true is the fully qualified path is for meta location */ public boolean isMetaZNodePath(String path) { int prefixLen = baseZNode.length() + 1; return path.length() > prefixLen && isMetaZNodePrefix(path.substring(prefixLen)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java index db3ccebe89d2..e57967ae7211 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java @@ -45,7 +45,6 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -135,10 +134,11 @@ public Void answer(InvocationOnMock invocation) throws Throwable { case INCREMENT: ColumnValue value = req.getColumnValue(0); QualifierValue qvalue = value.getQualifierValue(0); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) - .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()).build(); + Cell cell = + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); resp = MutateResponse.newBuilder() .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); break; @@ -496,8 +496,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { assertFalse("close scanner should not come in with scan priority " + scanPriority, req.hasCloseScanner() && req.getCloseScanner()); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) - .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setType(Cell.Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java index 99e52361109c..f9b86221af1e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java @@ -54,7 +54,6 @@ import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -135,8 +134,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { if (req.hasCloseScanner() && req.getCloseScanner()) { done.run(ScanResponse.getDefaultInstance()); } else { - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) - .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setType(Cell.Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); @@ -183,10 +182,11 @@ public Void answer(InvocationOnMock invocation) throws Throwable { case INCREMENT: ColumnValue value = req.getColumnValue(0); QualifierValue qvalue = value.getQualifierValue(0); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) - .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()).build(); + Cell cell = + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); resp = MutateResponse.newBuilder() .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); break; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java index f1a8e000136d..a56c863ce614 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java @@ -33,8 +33,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; @Category({ MiscTests.class, SmallTests.class }) public class TestCoprocessorDescriptor { @@ -43,8 +41,6 @@ public class TestCoprocessorDescriptor { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestCoprocessorDescriptor.class); - private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorDescriptor.class); - @Rule public TestName name = new TestName(); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index 44a1c577b10d..69c33c833b0c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -250,8 +250,7 @@ public void testDynamicFilter() throws Exception { public void testGetRowConstructor() { byte[] row1 = Bytes.toBytes("testRow"); byte[] row2 = Bytes.toBytes("testtestRow"); - ByteBuffer rowBuffer = ByteBuffer.allocate(16); - rowBuffer = ByteBuffer.wrap(row1); + ByteBuffer rowBuffer = ByteBuffer.wrap(row1); Get get1 = new Get(rowBuffer); Get get2 = new Get(row2, 4, 7); Assert.assertArrayEquals(get1.getRow(), get2.getRow()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java index dcb4d6eb88ad..718ca05b92f3 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellUtil; @@ -51,7 +50,7 @@ public void testAppendCopyConstructor() throws IOException { byte[] family = Bytes.toBytes("CF-01"); origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) - .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Put) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Put) .setValue(Bytes.toBytes(100)).build()); origin.addColumn(family, Bytes.toBytes("q0"), Bytes.toBytes("value")); origin.setTimeRange(100, 1000); @@ -89,7 +88,7 @@ public void testDeleteCopyConstructor() throws IOException { byte[] family = Bytes.toBytes("CF-01"); origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) - .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Delete).build()); + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Delete).build()); origin.addColumn(family, Bytes.toBytes("q0")); origin.addColumns(family, Bytes.toBytes("q1")); origin.addFamily(family); @@ -187,10 +186,11 @@ public void testAddImmutableToPut() throws IOException { Put put = new Put(row, true); put .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) - .setQualifier(qualifier0).setTimestamp(put.getTimestamp()).setType(Type.Put) + .setQualifier(qualifier0).setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) .setValue(value0).build()) .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) - .setQualifier(qualifier1).setTimestamp(ts1).setType(Type.Put).setValue(value1).build()); + .setQualifier(qualifier1).setTimestamp(ts1).setType(Cell.Type.Put).setValue(value1) + .build()); // Verify the cell of family:qualifier0 Cell cell0 = put.get(family, qualifier0).get(0); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java index 0403ca647355..6e665dd55fc2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java @@ -87,8 +87,8 @@ private void checkDescriptiveNameEquality(String descriptiveNameForDisplay, Stri String firstPartOrig = origDesc.substring(0, origDesc.indexOf(Bytes.toStringBinary(startKey))); String secondPartOrig = origDesc.substring( origDesc.indexOf(Bytes.toStringBinary(startKey)) + Bytes.toStringBinary(startKey).length()); - assert (firstPart.equals(firstPartOrig)); - assert (secondPart.equals(secondPartOrig)); + Assert.assertTrue(firstPart.equals(firstPartOrig)); + Assert.assertTrue(secondPart.equals(secondPartOrig)); } private void checkEquality(RegionInfo ri, Configuration conf) throws IOException { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java index 4023d745c065..4b124c68f862 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java @@ -164,22 +164,17 @@ public void testNullQualifier() { @Test public void testSetAuthorizations() { Scan scan = new Scan(); - try { - scan.setAuthorizations(new Authorizations("\u002b|\u0029")); - scan.setAuthorizations(new Authorizations("A", "B", "0123", "A0", "1A1", "_a")); - scan.setAuthorizations(new Authorizations("A|B")); - scan.setAuthorizations(new Authorizations("A&B")); - scan.setAuthorizations(new Authorizations("!B")); - scan.setAuthorizations(new Authorizations("A", "(A)")); - scan.setAuthorizations(new Authorizations("A", "{A")); - scan.setAuthorizations(new Authorizations(" ")); - scan.setAuthorizations(new Authorizations(":B")); - scan.setAuthorizations(new Authorizations("-B")); - scan.setAuthorizations(new Authorizations(".B")); - scan.setAuthorizations(new Authorizations("/B")); - } catch (IllegalArgumentException e) { - fail("should not throw exception"); - } + scan.setAuthorizations(new Authorizations("A", "B", "0123", "A0", "1A1", "_a")); + scan.setAuthorizations(new Authorizations("A|B")); + scan.setAuthorizations(new Authorizations("A&B")); + scan.setAuthorizations(new Authorizations("!B")); + scan.setAuthorizations(new Authorizations("A", "(A)")); + scan.setAuthorizations(new Authorizations("A", "{A")); + scan.setAuthorizations(new Authorizations(" ")); + scan.setAuthorizations(new Authorizations(":B")); + scan.setAuthorizations(new Authorizations("-B")); + scan.setAuthorizations(new Authorizations(".B")); + scan.setAuthorizations(new Authorizations("/B")); } @Test @@ -192,8 +187,7 @@ public void testSetStartRowAndSetStopRow() { scan.withStartRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { - } catch (Exception e) { - fail("expected IllegalArgumentException to be thrown"); + // Expected } scan.withStopRow(null); @@ -203,8 +197,7 @@ public void testSetStartRowAndSetStopRow() { scan.withStopRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { - } catch (Exception e) { - fail("expected IllegalArgumentException to be thrown"); + // Expected } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java index a7fcac95ee95..30fdbc367ff8 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java @@ -102,6 +102,7 @@ private void testIllegalArgument(String key, long value) { new SimpleRequestController(conf); fail("The " + key + " must be bigger than zero"); } catch (IllegalArgumentException e) { + // Expected } } @@ -115,6 +116,7 @@ public long heapSize() { } @Test + @SuppressWarnings("ArrayAsKeyOfSetOrMap") public void testTaskCheckerHost() throws IOException { final int maxTotalConcurrentTasks = 100; final int maxConcurrentTasksPerServer = 2; @@ -277,6 +279,7 @@ public void testSubmittedSizeChecker() { } @Test + @SuppressWarnings("ArrayAsKeyOfSetOrMap") public void testTaskCountChecker() throws InterruptedIOException { long heapSizeOfRow = 12345; int maxTotalConcurrentTasks = 100; @@ -358,10 +361,8 @@ public void testWaitForMaximumCurrentTasks() throws Exception { try { barrier.await(); controller.waitForMaximumCurrentTasks(max.get(), 123, 1, null); - } catch (InterruptedIOException e) { + } catch (InterruptedIOException | InterruptedException | BrokenBarrierException e) { Assert.fail(e.getMessage()); - } catch (InterruptedException | BrokenBarrierException e) { - e.printStackTrace(); } }; // First test that our runnable thread only exits when tasks is zero. diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java index 860544ba1350..0a888e838c1c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyByteBufferExtendedCell; import org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyCell; @@ -64,7 +63,8 @@ public void testKeyOnly() throws Exception { byte[] q = Bytes.toBytes("qual1"); byte[] v = Bytes.toBytes("val1"); byte[] tags = Bytes.toBytes("tag1"); - KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, v.length, tags); + KeyValue kv = + new KeyValue(r, f, q, 0, q.length, 1234L, KeyValue.Type.Put, v, 0, v.length, tags); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); ByteBufferKeyValue bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java index f9c93811b4eb..fccea923635a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java @@ -154,7 +154,7 @@ private static void timerTests(final CellBlockBuilder builder, final int count, StopWatch timer = new StopWatch(); timer.start(); for (int i = 0; i < cycles; i++) { - timerTest(builder, timer, count, size, codec, compressor, false); + timerTest(builder, count, size, codec, compressor, false); } timer.stop(); LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false + ", count=" @@ -162,16 +162,15 @@ private static void timerTests(final CellBlockBuilder builder, final int count, timer.reset(); timer.start(); for (int i = 0; i < cycles; i++) { - timerTest(builder, timer, count, size, codec, compressor, true); + timerTest(builder, count, size, codec, compressor, true); } timer.stop(); LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true + ", count=" + count + ", size=" + size + ", + took=" + timer.getTime() + "ms"); } - private static void timerTest(final CellBlockBuilder builder, final StopWatch timer, - final int count, final int size, final Codec codec, final CompressionCodec compressor, - final boolean sized) throws IOException { + private static void timerTest(final CellBlockBuilder builder, final int count, final int size, + final Codec codec, final CompressionCodec compressor, final boolean sized) throws IOException { doBuildCellBlockUndoCellBlock(builder, codec, compressor, count, size, sized); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java index c327896f72ab..67a8d15c1d02 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java @@ -121,7 +121,7 @@ public void testWrapConnectionException() throws Exception { } @Test - public void testExecute() throws IOException { + public void testExecute() throws Exception { EventLoop eventLoop = new DefaultEventLoop(); MutableInt executed = new MutableInt(0); MutableInt numStackTraceElements = new MutableInt(0); @@ -156,7 +156,7 @@ public void run() { }); FutureUtils.get(future); } finally { - eventLoop.shutdownGracefully(); + eventLoop.shutdownGracefully().get(); } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java index a9c40fd3bb79..73f3653a9026 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java @@ -20,6 +20,7 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; @@ -69,9 +70,8 @@ public static void tearDown() throws IOException { @Test public void testPrivateMethodExecutedInEventLoop() throws IllegalAccessException { - // make sure the test is executed with "-ea" assertThrows(AssertionError.class, () -> { - assert false; + assertTrue(false); }); for (Method method : NettyRpcConnection.class.getDeclaredMethods()) { if (Modifier.isPrivate(method.getModifiers()) && !method.getName().contains("$")) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java index e2843180938e..a3c96eb45aa9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java @@ -143,6 +143,7 @@ void expectFailure(QuotaSettings one, QuotaSettings two) throws IOException { one.merge(two); fail("Expected to see an Exception merging " + two + " into " + one); } catch (IllegalArgumentException e) { + // Expected } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java index 8d82ba538bda..b2758b408d8b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java @@ -140,14 +140,8 @@ public void testDigestSaslClientCallbackHandlerWithException() { @Test public void testHBaseSaslRpcClientCreation() throws Exception { // creation kerberos principal check section - assertFalse(assertSuccessCreationKerberosPrincipal(null)); - assertFalse(assertSuccessCreationKerberosPrincipal("DOMAIN.COM")); - assertFalse(assertSuccessCreationKerberosPrincipal("principal/DOMAIN.COM")); - if (!assertSuccessCreationKerberosPrincipal("principal/localhost@DOMAIN.COM")) { - // XXX: This can fail if kerberos support in the OS is not sane, see HBASE-10107. - // For now, don't assert, just warn - LOG.warn("Could not create a SASL client with valid Kerberos credential"); - } + // Note this is mocked in a way that doesn't care about principal names + assertFalse(assertSuccessCreationKerberos()); // creation digest principal check section assertFalse(assertSuccessCreationDigestPrincipal(null, null)); @@ -157,9 +151,8 @@ public void testHBaseSaslRpcClientCreation() throws Exception { assertTrue(assertSuccessCreationDigestPrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); // creation simple principal check section - assertFalse(assertSuccessCreationSimplePrincipal("", "")); - assertFalse(assertSuccessCreationSimplePrincipal(null, null)); - assertFalse(assertSuccessCreationSimplePrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); + // Note this is mocked in a way that doesn't care about principal names + assertFalse(assertSuccessCreationSimple()); // exceptions check section assertTrue(assertIOExceptionThenSaslClientIsNull(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); @@ -247,10 +240,11 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddress, } } - private boolean assertSuccessCreationKerberosPrincipal(String principal) { + private boolean assertSuccessCreationKerberos() { HBaseSaslRpcClient rpcClient = null; try { - rpcClient = createSaslRpcClientForKerberos(principal); + // createSaslRpcClientForKerberos is mocked in a way that doesn't care about principal names + rpcClient = createSaslRpcClientForKerberos(); } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } @@ -270,17 +264,17 @@ private boolean assertSuccessCreationDigestPrincipal(String principal, String pa return rpcClient != null; } - private boolean assertSuccessCreationSimplePrincipal(String principal, String password) { + private boolean assertSuccessCreationSimple() { HBaseSaslRpcClient rpcClient = null; try { - rpcClient = createSaslRpcClientSimple(principal, password); + rpcClient = createSaslRpcClientSimple(); } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; } - private HBaseSaslRpcClient createSaslRpcClientForKerberos(String principal) throws IOException { + private HBaseSaslRpcClient createSaslRpcClientForKerberos() throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), new GssSaslClientAuthenticationProvider(), createTokenMock(), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); @@ -296,8 +290,7 @@ private Token createTokenMockWithCredentials(String p return token; } - private HBaseSaslRpcClient createSaslRpcClientSimple(String principal, String password) - throws IOException { + private HBaseSaslRpcClient createSaslRpcClientSimple() throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), new SimpleSaslClientAuthenticationProvider(), createTokenMock(), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java index d7ce6265b8c5..04ba2570eac8 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java @@ -120,10 +120,10 @@ public void testMultiThreadedRoundRobin() throws ExecutionException, Interrupted String value = Integer.toString(id.getAndIncrement()); String result = poolMap.getOrCreate(key, () -> value); results.add(result); - - Thread.yield(); + // Sleep for a short time to ensure a yield. Thread#yield has platform dependent behavior. + Thread.sleep(10); } - } catch (IOException e) { + } catch (Exception e) { throw new CompletionException(e); } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java index 45f533f1a730..d83944ef7f3a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java @@ -95,10 +95,10 @@ public void testLocality() throws ExecutionException, InterruptedException { for (int i = 0; i < 3; i++) { String result = poolMap.getOrCreate(key, () -> myId); assertEquals(myId, result); - - Thread.yield(); + // Sleep for a short period of time to yield. Thread#yield is platform dependent. + Thread.sleep(10); } - } catch (IOException e) { + } catch (Exception e) { throw new CompletionException(e); } };