diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd0a12edd93..42ef94a0b36 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -218,7 +218,7 @@ jobs: distribution: 'temurin' java-version: ${{ matrix.java }} - name: Compile Ozone using Java ${{ matrix.java }} - run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Dskip.npx -Dskip.installnpx -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} + run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Dskip.npx -Dskip.installnpx -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} env: OZONE_WITH_COVERAGE: false DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java index 3e78abbf485..e3f7f043a9e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ExtendedInputStream.java @@ -75,7 +75,6 @@ public synchronized int read(ByteBuffer byteBuffer) throws IOException { * readWithStrategy implementation, as it will never be called by the tests. * * @param strategy - * @return * @throws IOException */ protected abstract int readWithStrategy(ByteReaderStrategy strategy) throws diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index 6342de2c338..83abb937b03 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -152,7 +152,6 @@ protected int calculateExpectedDataBlocks(ECReplicationConfig rConfig) { * Using the current position, returns the index of the blockStream we should * be reading from. This is the index in the internal array holding the * stream reference. The block group index will be one greater than this. - * @return */ protected int currentStreamIndex() { return (int)((position / ecChunkSize) % repConfig.getData()); @@ -206,7 +205,6 @@ protected BlockExtendedInputStream getOrOpenStream(int locationIndex) throws IOE * to the replicaIndex given based on the EC pipeline fetched from SCM. * @param replicaIndex * @param refreshFunc - * @return */ protected Function ecPipelineRefreshFunction( int replicaIndex, Function refreshFunc) { @@ -241,7 +239,6 @@ protected Function ecPipelineRefreshFunction( * potentially partial last stripe. Note that the internal block index is * numbered starting from 1. * @param index - Index number of the internal block, starting from 1 - * @return */ protected long internalBlockLength(int index) { long lastStripe = blockInfo.getLength() % stripeSize; @@ -344,7 +341,6 @@ protected boolean shouldRetryFailedRead(int failedIndex) { * strategy buffer. This call may read from several internal BlockInputStreams * if there is sufficient space in the buffer. * @param strategy - * @return * @throws IOException */ @Override @@ -409,7 +405,6 @@ protected void seekStreamIfNecessary(BlockExtendedInputStream stream, * group length. * @param stream Stream to read from * @param strategy The ReaderStrategy to read data into - * @return * @throws IOException */ private int readFromStream(BlockExtendedInputStream stream, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 794b972f150..662f4b39640 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -650,7 +650,7 @@ public static File createDir(String dirPath) { * Utility string formatter method to display SCM roles. * * @param nodes - * @return + * @return String */ public static String format(List nodes) { StringBuilder sb = new StringBuilder(); @@ -680,7 +680,8 @@ public static int roundupMb(long bytes) { /** * Unwrap exception to check if it is some kind of access control problem - * ({@link AccessControlException} or {@link SecretManager.InvalidToken}) + * ({@link org.apache.hadoop.security.AccessControlException} or + * {@link org.apache.hadoop.security.token.SecretManager.InvalidToken}) * or a RpcException. */ public static Throwable getUnwrappedException(Exception ex) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java index c176ad1464e..20755a6e0ec 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java @@ -97,7 +97,6 @@ public static JsonNode getBeansJsonNode(String metricsJson) throws IOException { * Returns the number of decommissioning nodes. * * @param jsonNode - * @return */ public static int getNumDecomNodes(JsonNode jsonNode) { int numDecomNodes; @@ -118,7 +117,6 @@ public static int getNumDecomNodes(JsonNode jsonNode) { * @param numDecomNodes * @param countsMap * @param errMsg - * @return * @throws IOException */ @Nullable diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 1427fbf5873..2ca3e947438 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -396,7 +396,6 @@ StartContainerBalancerResponseProto startContainerBalancer( * Force generates new secret keys (rotate). * * @param force boolean flag that forcefully rotates the key on demand - * @return * @throws IOException */ boolean rotateSecretKeys(boolean force) throws IOException; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java index df8e9d45e13..45bc77d1d8f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java @@ -146,7 +146,6 @@ public long getReportTimeStamp() { /** * Return a map of all stats and their value as a long. - * @return */ public Map getStats() { Map result = new HashMap<>(); @@ -159,7 +158,6 @@ public Map getStats() { /** * Return a map of all samples, with the stat as the key and the samples * for the stat as a List of Long. - * @return */ public Map> getSamples() { Map> result = new HashMap<>(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java index a5e443a598d..66fe7d18783 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java @@ -67,7 +67,6 @@ public class SCMNodeInfo { /** * Build SCM Node information from configuration. * @param conf - * @return */ public static List buildNodeInfo(ConfigurationSource conf) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 332dddac25c..779f2456be6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -307,10 +307,13 @@ public void remove(Node node) { * @param loc string location of a node. If loc starts with "/", it's a * absolute path, otherwise a relative path. Following examples * are all accepted, + *
+   *            {@code
    *            1.  /dc1/rm1/rack1          -> an inner node
    *            2.  /dc1/rm1/rack1/node1    -> a leaf node
    *            3.  rack1/node1             -> a relative path to this node
-   *
+   *            }
+   *            
* @return null if the node is not found */ @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 1486f05f55c..54a32e9c340 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -243,7 +243,6 @@ public int getReplicaIndex(DatanodeDetails dn) { /** * Get the replicaIndex Map. - * @return */ public Map getReplicaIndexes() { return this.getNodes().stream().collect(Collectors.toMap(Function.identity(), this::getReplicaIndex)); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index 31aaca568e4..66685b4bbbd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -131,7 +131,7 @@ public static InetSocketAddress updateListenAddress(OzoneConfiguration conf, * Fall back to OZONE_METADATA_DIRS if not defined. * * @param conf - * @return + * @return File */ public static File getScmDbDir(ConfigurationSource conf) { File metadataDir = getDirectoryFromConfig(conf, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java index 477a291f928..9579d4e73bf 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LeakDetector.java @@ -31,7 +31,6 @@ /** * Simple general resource leak detector using {@link ReferenceQueue} and {@link java.lang.ref.WeakReference} to * observe resource object life-cycle and assert proper resource closure before they are GCed. - * *

* Example usage: * @@ -43,16 +42,18 @@ * // report leaks, don't refer to the original object (MyResource) here. * System.out.println("MyResource is not closed before being discarded."); * }); - * - * @Override + * } + * } + * + *

+ *   {@code @Override
  *   public void close() {
  *     // proper resources cleanup...
  *     // inform tracker that this object is closed properly.
  *     leakTracker.close();
  *   }
- * }
- *
- * }
+ * } + * */ public class LeakDetector { private static final Logger LOG = LoggerFactory.getLogger(LeakDetector.class); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java index dff0b015ed5..8d6f3c32e53 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java @@ -23,9 +23,9 @@ import java.io.IOException; /** - * A {@link Codec} to serialize/deserialize objects by delegation. + * A {@link org.apache.hadoop.hdds.utils.db.Codec} to serialize/deserialize objects by delegation. * - * @param The object type of this {@link Codec}. + * @param The object type of this {@link org.apache.hadoop.hdds.utils.db.Codec}. * @param The object type of the {@link #delegate}. */ public class DelegatedCodec implements Codec { @@ -53,8 +53,8 @@ public enum CopyType { * Construct a {@link Codec} using the given delegate. * * @param delegate the delegate {@link Codec} - * @param forward a function to convert {@link DELEGATE} to {@link T}. - * @param backward a function to convert {@link T} back to {@link DELEGATE}. + * @param forward a function to convert {@code DELEGATE} to {@code T}. + * @param backward a function to convert {@code T} back to {@code DELEGATE}. * @param copyType How to {@link #copyObject(Object)}? */ public DelegatedCodec(Codec delegate, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java index 676216465b3..a24d39e5dac 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java @@ -39,8 +39,7 @@ static ChunkBuffer allocate(int capacity) { return allocate(capacity, 0); } - /** - * Similar to {@link ByteBuffer#allocate(int)} + /** Similar to {@link ByteBuffer#allocate(int)} * except that it can specify the increment. * * @param increment diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java index fdf40af9e09..832ab40d30f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java @@ -27,7 +27,7 @@ /** * Helper class to convert between protobuf lists and Java lists of - * {@link ContainerProtos.ChunkInfo} objects. + * {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo} objects. *

* This class is immutable. */ @@ -49,7 +49,7 @@ public ChunkInfoList(List chunks) { } /** - * @return A new {@link ChunkInfoList} created from protobuf data. + * @return A new {@link #ChunkInfoList} created from protobuf data. */ public static ChunkInfoList getFromProtoBuf( ContainerProtos.ChunkInfoList chunksProto) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java index 76fc404361a..b94dd024b2d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionInstanceFactory.java @@ -69,7 +69,7 @@ public class LayoutVersionInstanceFactory { /** * Register an instance with a given factory key (key + version). * For safety reasons we dont allow (1) re-registering, (2) registering an - * instance with version > SLV. + * instance with version > SLV. * * @param lvm LayoutVersionManager * @param key VersionFactoryKey key to associate with instance. @@ -136,13 +136,15 @@ private boolean isValid(LayoutVersionManager lvm, int version) { } /** + *

    * From the list of versioned instances for a given "key", this
    * returns the "floor" value corresponding to the given version.
-   * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
-   * (3, CreateKeyV2), and if the passed in key = CreateKey and version = 2, we
+   * For example, if we have key = "CreateKey",  entry -> [(1, CreateKeyV1),
+   * (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
    * return CreateKeyV1.
    * Since this is a priority queue based implementation, we use a O(1) peek()
    * lookup to get the current valid version.
+   * 
* @param lvm LayoutVersionManager * @param key Key and Version. * @return instance. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java index 3137d756e6b..a765c2c9455 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutVersionManager.java @@ -74,7 +74,6 @@ public interface LayoutVersionManager { /** * Generic API for returning a registered handler for a given type. * @param type String type - * @return */ default Object getHandler(String type) { return null; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java index 44ae94870e3..19c0498aa7a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/UpgradeFinalizer.java @@ -50,14 +50,14 @@ public interface UpgradeFinalizer { * Represents the current state in which the service is with regards to * finalization after an upgrade. * The state transitions are the following: - * ALREADY_FINALIZED - no entry no exit from this status without restart. + * {@code ALREADY_FINALIZED} - no entry no exit from this status without restart. * After an upgrade: - * FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION - * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE from finalization done + * {@code FINALIZATION_REQUIRED -(finalize)-> STARTING_FINALIZATION + * -> FINALIZATION_IN_PROGRESS -> FINALIZATION_DONE} from finalization done * there is no more move possible, after a restart the service can end up in: - * - FINALIZATION_REQUIRED, if the finalization failed and have not reached - * FINALIZATION_DONE, - * - or it can be ALREADY_FINALIZED if the finalization was successfully done. + * {@code FINALIZATION_REQUIRED}, if the finalization failed and have not reached + * {@code FINALIZATION_DONE}, + * - or it can be {@code ALREADY_FINALIZED} if the finalization was successfully done. */ enum Status { ALREADY_FINALIZED, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java index bda45f5a745..6465cc85501 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/VersionFactoryKey.java @@ -20,7 +20,7 @@ /** * "Key" element to the Version specific instance factory. Currently it has 2 - * dimensions -> a 'key' string and a version. This is to support a factory + * dimensions -> a 'key' string and a version. This is to support a factory * which returns an instance for a given "key" and "version". */ public class VersionFactoryKey { diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java index b1a20c9aecb..0d6c0c90878 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java @@ -108,7 +108,7 @@ default String[] getTrimmedStrings(String name) { /** * Gets the configuration entries where the key contains the prefix. This * method will strip the prefix from the key in the return Map. - * Example: somePrefix.key->value will be key->value in the returned map. + * Example: {@code somePrefix.key->value} will be {@code key->value} in the returned map. * @param keyPrefix Prefix to search. * @return Map containing keys that match and their values. */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java index d6ca2d120e6..2e11cde3d9e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java @@ -35,7 +35,7 @@ /** * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}). * The outer container map does not entail locking for a better performance. - * The inner {@link BlockDataMap} is synchronized. + * The inner {@code BlockDataMap} is synchronized. * * This class will maintain list of open keys per container when closeContainer * command comes, it should autocommit all open keys of a open container before diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java index c584ba79037..bb47b5b9b6f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicyTemplate.java @@ -90,7 +90,7 @@ public final List chooseContainerForBlockDeletion( /** * Abstract step for ordering the container data to be deleted. * Subclass need to implement the concrete ordering implementation - * in descending order (more prioritized -> less prioritized) + * in descending order (more prioritized -> less prioritized) * @param candidateContainers candidate containers to be ordered */ protected abstract void orderByDescendingPriority( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java index d02bae0a35a..f075b6f67ca 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java @@ -75,7 +75,6 @@ void validateContainerCommand( /** * Returns the handler for the specified containerType. * @param containerType - * @return */ Handler getHandler(ContainerProtos.ContainerType containerType); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 9dc6af19353..873096a024d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -705,9 +705,9 @@ private ExecutorService getChunkExecutor(WriteChunkRequestProto req) { } /** - * {@link #writeStateMachineData(ContainerCommandRequestProto, long, long, long)} + * {@link #writeStateMachineData} * calls are not synchronized with each other - * and also with {@link #applyTransaction(TransactionContext)}. + * and also with {@code applyTransaction(TransactionContext)}. */ @Override public CompletableFuture write(LogEntryProto entry, TransactionContext trx) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index 2172c07c3a2..3d1be9791ec 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -57,7 +57,7 @@ * | fsAvail |-------other-----------| * |<- fsCapacity ->| * } - * + *
  * What we could directly get from local fs:
  *     fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
  * We could get from config:
@@ -80,13 +80,13 @@
  * then we should use DedicatedDiskSpaceUsage for
  * `hdds.datanode.du.factory.classname`,
  * Then it is much simpler, since we don't care about other usage:
- * 
  * {@code
  *  |----used----|             (avail)/fsAvail              |
  *  |<-              capacity/fsCapacity                  ->|
- *  }
- * 
+ * } + * * We have avail == fsAvail. + *
*/ public final class VolumeInfo { @@ -157,14 +157,14 @@ public long getCapacity() { } /** - * Calculate available space use method A. *
    * {@code
+   * Calculate available space use method A.
    * |----used----|   (avail)   |++++++++reserved++++++++|
    * |<-     capacity         ->|
-   * }
-   *
* A) avail = capacity - used + * } + * */ public long getAvailable() { return usage.getAvailable(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index a91f0c1f72a..733dc7964f1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -82,7 +82,7 @@ public long getUsedSpace() { * Calculate available space use method B. * |----used----| (avail) |++++++++reserved++++++++| * | fsAvail |-------other-------| - * ->|~~~~|<- + * ->|~~~~|<- * remainingReserved * } * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 98d81c15d0a..ae3288a3e98 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -932,7 +932,6 @@ private ContainerReplicaProto.State getHddsState() /** * Returns container DB file. - * @return */ public File getContainerDBFile() { return KeyValueContainerLocationUtil.getContainerDBFile(containerData); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 47d4f3f9e70..4ea8552e780 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -430,7 +430,6 @@ public KeyPrefixFilter getDeletingBlockKeyFilter() { /** * Schema v3 use a prefix as startKey, * for other schemas just return null. - * @return */ public String startKeyEmpty() { if (hasSchema(SCHEMA_V3)) { @@ -442,7 +441,6 @@ public String startKeyEmpty() { /** * Schema v3 use containerID as key prefix, * for other schemas just return null. - * @return */ public String containerPrefix() { if (hasSchema(SCHEMA_V3)) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 7773b54f794..945efbcf6ea 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -99,7 +99,6 @@ public static DatanodeStore getUncachedDatanodeStore( * opened by this thread, the other thread will get a RocksDB exception. * @param containerData The container data * @param conf Configuration - * @return * @throws IOException */ public static DatanodeStore getUncachedDatanodeStore( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index 288a2d3e331..aa5d52f3cee 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -46,7 +46,6 @@ private ChunkManagerFactory() { * @param conf Configuration * @param manager This parameter will be used only for read data of * FILE_PER_CHUNK layout file. Can be null for other cases. - * @return */ public static ChunkManager createChunkManager(ConfigurationSource conf, BlockManager manager, VolumeSet volSet) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java index 8df856d4b93..601e7b2712c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java @@ -99,7 +99,9 @@ public void setLinked() { linked.set(true); } - /** @return true iff {@link StateMachine.DataChannel} is already linked. */ + /** + * @return true if {@link org.apache.ratis.statemachine.StateMachine.DataChannel} is already linked. + */ public boolean cleanUp() { if (linked.get()) { // already linked, nothing to do. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 26719d7f035..88aeb3c174d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -300,9 +300,9 @@ protected static void checkTableStatus(Table table, String name) /** * Block Iterator for KeyValue Container. This block iterator returns blocks - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no + * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public public static class KeyValueBlockIterator implements @@ -405,9 +405,9 @@ public void close() throws IOException { /** * Block localId Iterator for KeyValue Container. * This Block localId iterator returns localIds - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no + * which match with the {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getUnprefixedKeyFilter()} + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()} */ @InterfaceAudience.Public public static class KeyValueBlockLocalIdIterator implements diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java index 4beb2075432..1be5a3819c8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java @@ -25,7 +25,8 @@ import java.io.IOException; /** - * Codec for parsing {@link ContainerProtos.ChunkInfoList} objects from data + * Codec for parsing {@link org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfoList} + * objects from data * that may have been written using schema version one. Before upgrading * schema versions, deleted block IDs were stored with a duplicate copy of * their ID as the value in the database. After upgrading the code, any diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java index 5fdfc931b99..e49f3c3d6e5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/StreamingSource.java @@ -27,9 +27,9 @@ public interface StreamingSource { /** * - * @param id: custom identifier + * @param id custom identifier * - * @return map of files which should be copied (logical name -> real path) + * @return map of files which should be copied (logical name -> real path) */ Map getFilesToStream(String id) throws InterruptedException; diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml index 3a69c793c26..288085ef948 100644 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml @@ -88,6 +88,7 @@ + diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java index cbb4f3fc2ee..0cb39482e98 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java @@ -118,7 +118,6 @@ String getSCMCertificate(ScmNodeDetailsProto scmNodeDetails, /** * Get Root CA certificate. - * @return * @throws IOException */ String getRootCACertificate() throws IOException; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java index a938d53c7c4..71918308f14 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java @@ -330,7 +330,6 @@ public SCMGetCertResponseProto getCACert() throws IOException { * @param role - node type: OM/SCM/DN. * @param startSerialId - start cert serial id. * @param count - max number of certificates returned in a batch. - * @return * @throws IOException */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java index e7e029f7087..da651160d04 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java @@ -50,8 +50,7 @@ void verify(Token token, ContainerCommandRequestProtoOrBuilder cmd) throws SCMSecurityException; - /** Same as {@link #verify(Token, - * ContainerCommandRequestProtoOrBuilder)}, but with encoded token. */ + /** Same as {@link #verify}, but with encoded token. */ default void verify(ContainerCommandRequestProtoOrBuilder cmd, String encodedToken) throws SCMSecurityException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index b277a759cb8..42292b9663f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -668,6 +668,8 @@ protected enum InitCase { * certificate. * * Truth table: + *
+   * {@code
    *  +--------------+---------------+--------------+---------------------+
    *  | Private Key  | Public Keys   | Certificate  |   Result            |
    *  +--------------+---------------+--------------+---------------------+
@@ -680,7 +682,8 @@ protected enum InitCase {
    *  | True   (1)   | True    (1)   | False  (0)   |   GETCERT->SUCCESS  |
    *  | True   (1)   | True    (1)   | True   (1)   |   SUCCESS           |
    *  +--------------+-----------------+--------------+----------------+
-   *
+   * }
+   * 
* Success in following cases: * 1. If keypair as well certificate is available. * 2. If private key and certificate is available and public key is @@ -1083,7 +1086,7 @@ public Duration timeBeforeExpiryGracePeriod(X509Certificate certificate) { * Renew keys and certificate. Save the keys are certificate to disk in new * directories, swap the current key directory and certs directory with the * new directories. - * @param force, check certificate expiry time again if force is false. + * @param force check certificate expiry time again if force is false. * @return String, new certificate ID * */ public String renewAndStoreKeyAndCertificate(boolean force) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index f27f42e0b4c..9d037fed6bc 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -123,8 +123,8 @@ /** * Create a Jetty embedded server to answer http requests. The primary goal is * to serve up status information for the server. There are three contexts: - * "/logs/" -> points to the log directory "/static/" -> points to common static - * files (src/webapps/static) "/" -> the jsp server code from + * "/logs/" -> points to the log directory "/static/" -> points to common static + * files (src/webapps/static) "/" -> the jsp server code from * (src/webapps/) * * This class is a fork of the old HttpServer. HttpServer exists for diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java index 50925fcac7c..bceec92c6c8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java @@ -80,7 +80,7 @@ * curl "http://localhost:10002/prof" * - To collect 1 minute CPU profile of current process and output in tree * format (html) - * curl{@literal "http://localhost:10002/prof?output=tree&duration=60"} + * curl "http://localhost:10002/prof?output=tree&duration=60" * - To collect 30 second heap allocation profile of current process (returns * FlameGraph svg) * curl "http://localhost:10002/prof?event=alloc" diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 41fea63d205..94e9dceb6a7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -125,7 +125,7 @@ private HddsServerUtil() { HddsServerUtil.class); /** - * Add protobuf-based protocol to the {@link RPC.Server}. + * Add protobuf-based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}. * @param conf configuration * @param protocol Protocol interface * @param service service that implements the protocol diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java index 3e8ea30a652..8623a3bdd7d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java @@ -184,7 +184,7 @@ void move(KEY sourceKey, KEY destKey, VALUE value, /** * Get List of Index to Table Names. * (For decoding table from column family index) - * @return Map of Index -> TableName + * @return Map of Index -> TableName */ Map getTableNames(); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index c441ec929c7..c156b8e4d67 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -841,7 +841,7 @@ private int getLastLevel() throws IOException { /** * Deletes sst files which do not correspond to prefix * for given table. - * @param prefixPairs, a map of TableName to prefixUsed. + * @param prefixPairs a map of TableName to prefixUsed. */ public void deleteFilesNotMatchingPrefix(Map prefixPairs) throws IOException, RocksDBException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java index 5cfdcdb8a03..c7055267052 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java @@ -170,7 +170,7 @@ default VALUE getReadCopy(KEY key) throws IOException { /** * Returns a prefixed iterator for this metadata store. * @param prefix - * @return + * @return MetaStoreIterator */ TableIterator> iterator(KEY prefix) throws IOException; @@ -246,7 +246,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { /** * Returns a certain range of key value pairs as a list based on a - * startKey or count. Further a {@link MetadataKeyFilters.MetadataKeyFilter} + * startKey or count. Further a {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter} * can be added to * filter keys if necessary. * To prevent race conditions while listing * entries, this implementation takes a snapshot and lists the entries from @@ -262,7 +262,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { * the value for count must be an integer greater than 0. *

* This method allows to specify one or more - * {@link MetadataKeyFilters.MetadataKeyFilter} + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter} * to filter keys by certain condition. Once given, only the entries * whose key passes all the filters will be included in the result. * @@ -270,7 +270,7 @@ default TableCacheMetrics createCacheMetrics() throws IOException { * @param count max number of entries to return. * @param prefix fixed key schema specific prefix * @param filters customized one or more - * {@link MetadataKeyFilters.MetadataKeyFilter}. + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}. * @return a list of entries found in the database or an empty list if the * startKey is invalid. * @throws IOException if there are I/O errors. @@ -293,7 +293,7 @@ List> getRangeKVs(KEY startKey, * @param count max number of entries to return. * @param prefix fixed key schema specific prefix * @param filters customized one or more - * {@link MetadataKeyFilters.MetadataKeyFilter}. + * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}. * @return a list of entries found in the database. * @throws IOException * @throws IllegalArgumentException diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java index a792e2cea6b..05eb32722e7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PlacementPolicy.java @@ -73,7 +73,7 @@ ContainerPlacementStatus validateContainerPlacement( * Given a set of replicas of a container which are * neither over underreplicated nor overreplicated, * return a set of replicas to copy to another node to fix misreplication. - * @param replicas: Map of replicas with value signifying if + * @param replicas Map of replicas with value signifying if * replica can be copied */ Set replicasToCopyToFixMisreplication( @@ -82,8 +82,8 @@ Set replicasToCopyToFixMisreplication( /** * Given a set of replicas of a container which are overreplicated, * return a set of replicas to delete to fix overreplication. - * @param replicas: Set of existing replicas of the container - * @param expectedCountPerUniqueReplica: Replication factor of each + * @param replicas Set of existing replicas of the container + * @param expectedCountPerUniqueReplica Replication factor of each * unique replica */ Set replicasToRemoveToFixOverreplication( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index fabd2810764..2a1c6fce0c0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -525,7 +525,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, * Given a set of replicas of a container which are * neither over underreplicated nor overreplicated, * return a set of replicas to copy to another node to fix misreplication. - * @param replicas: Map of replicas with value signifying if + * @param replicas Map of replicas with value signifying if * replica can be copied */ @Override @@ -595,8 +595,8 @@ protected Node getPlacementGroup(DatanodeDetails dn) { * to be removed is also removed from the maps created above and * the count for rack is reduced. * The set of replicas computed are then returned by the function. - * @param replicas: Set of existing replicas of the container - * @param expectedCountPerUniqueReplica: Replication factor of each + * @param replicas Set of existing replicas of the container + * @param expectedCountPerUniqueReplica Replication factor of each * * unique replica * @return Set of replicas to be removed are computed. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index af900d1c146..6b6a888f424 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -61,9 +61,9 @@ default List getContainers() { * The max size of the searching range cannot exceed the * value of count. * - * @param startID start containerID, >=0, + * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @@ -85,9 +85,9 @@ default List getContainers() { * The max size of the searching range cannot exceed the * value of count. * - * @param startID start containerID, >=0, + * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @param state container state diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java index 0abe8f6ea34..fcfef7de6e6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthResult.java @@ -248,7 +248,6 @@ public void setOfflineIndexesOkAfterPending(boolean val) { /** * Returns true if a container has under-replication caused by offline * indexes, but it is corrected by a pending add. - * @return */ public boolean offlineIndexesOkAfterPending() { return offlineIndexesOkAfterPending; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java index d1890bdf802..4eef0a8a744 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java @@ -116,7 +116,7 @@ public List getPendingOps(ContainerID containerID) { * Store a ContainerReplicaOp to add a replica for the given ContainerID. * @param containerID ContainerID for which to add a replica * @param target The target datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @param deadlineEpochMillis The time by which the replica should have been * added and reported by the datanode, or it will * be discarded. @@ -130,7 +130,7 @@ public void scheduleAddReplica(ContainerID containerID, * Store a ContainerReplicaOp to delete a replica for the given ContainerID. * @param containerID ContainerID for which to delete a replica * @param target The target datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @param deadlineEpochMillis The time by which the replica should have been * deleted and reported by the datanode, or it will * be discarded. @@ -145,7 +145,7 @@ public void scheduleDeleteReplica(ContainerID containerID, * been replicated successfully. * @param containerID ContainerID for which to complete the replication * @param target The target Datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @return True if a pending replica was found and removed, false otherwise. */ public boolean completeAddReplica(ContainerID containerID, @@ -167,7 +167,7 @@ public boolean completeAddReplica(ContainerID containerID, * been deleted successfully. * @param containerID ContainerID for which to complete the deletion * @param target The target Datanode - * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) + * @param replicaIndex The replica index (zero for Ratis, > 0 for EC) * @return True if a pending replica was found and removed, false otherwise. */ public boolean completeDeleteReplica(ContainerID containerID, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java index fe771fac6a4..4e14798ccdc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisContainerReplicaCount.java @@ -186,9 +186,9 @@ private void countReplicas() { * For example, consider a CLOSED container with the following replicas: * {CLOSED, CLOSING, OPEN, UNHEALTHY} * In this case, healthy replica count equals 3. Calculation: - * 1 CLOSED -> 1 matching replica. - * 1 OPEN, 1 CLOSING -> 2 mismatched replicas. - * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy. + * 1 CLOSED -> 1 matching replica. + * 1 OPEN, 1 CLOSING -> 2 mismatched replicas. + * 1 UNHEALTHY -> 1 unhealthy replica. Not counted as healthy. * Total healthy replicas = 3 = 1 matching + 2 mismatched replicas */ public int getHealthyReplicaCount() { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java index a95c0d39945..f271b8a863c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java @@ -49,7 +49,6 @@ public interface HealthCheck { * returns false. This allows handlers to be chained together, and each will * be tried in turn until one succeeds. * @param handler - * @return */ HealthCheck addNext(HealthCheck handler); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java index c6f15be5d2c..1289a0a21ff 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java @@ -37,10 +37,10 @@ *

* Currently we manage the following attributes for a container. *

- * 1. StateMap - LifeCycleState -> Set of ContainerIDs - * 2. TypeMap - ReplicationType -> Set of ContainerIDs - * 3. OwnerMap - OwnerNames -> Set of ContainerIDs - * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs + * 1. StateMap - LifeCycleState -> Set of ContainerIDs + * 2. TypeMap - ReplicationType -> Set of ContainerIDs + * 3. OwnerMap - OwnerNames -> Set of ContainerIDs + * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs *

* This means that for a cluster size of 750 PB -- we will have around 150 * Million containers, if we assume 5GB average container size. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java index f0d78b23079..5eeb489f677 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java @@ -58,7 +58,7 @@ /** * Utilities for SCM HA security. */ -public final class HASecurityUtils { +public final class HASecurityUtils { private HASecurityUtils() { } @@ -150,7 +150,6 @@ public static CertificateServer initializeRootCertificateServer( * * @param conf * @param certificateClient - * @return */ public static GrpcTlsConfig createSCMRatisTLSConfig(SecurityConfig conf, CertificateClient certificateClient) throws IOException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index 03f6ae293b2..92a5140ff2a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -55,7 +55,6 @@ public interface SCMHAManager extends AutoCloseable { /** * Returns the DBTransactionBuffer as SCMHADBTransactionBuffer if its * valid. - * @return */ SCMHADBTransactionBuffer asSCMHADBTransactionBuffer(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index 05ed833edbe..b3350d8a12a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -346,7 +346,7 @@ public void setCommandCounts(CommandQueueReportProto cmds, * Retrieve the number of queued commands of the given type, as reported by * the datanode at the last heartbeat. * @param cmd The command for which to receive the queued command count - * @return -1 if we have no information about the count, or an integer >= 0 + * @return -1 if we have no information about the count, or an integer >= 0 * indicating the command count at the last heartbeat. */ public int getCommandCount(SCMCommandProto.Type cmd) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 7121d8f7a9d..7db0c88e173 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -1166,7 +1166,6 @@ public Map> getNodeStatusInfo() { * Calculate the storage capacity of the DataNode node. * @param storageReports Calculate the storage capacity corresponding * to the storage collection. - * @return */ public static String calculateStorageCapacity( List storageReports) { @@ -1214,7 +1213,6 @@ private static String convertUnit(double value) { * Calculate the storage usage percentage of a DataNode node. * @param storageReports Calculate the storage percentage corresponding * to the storage collection. - * @return */ public static String[] calculateStoragePercentage( List storageReports) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 0cc6ab7ab9d..105e7ac3486 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -483,7 +483,6 @@ private static String flatten(String input) { /** * Get Key associated with Datanode address for this server. - * @return */ protected String getDatanodeAddressKey() { return this.scm.getScmNodeDetails().getDatanodeAddressKey(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java index 88b3c887746..17318107e3d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java @@ -430,7 +430,6 @@ public String getCACertificate() throws IOException { * @param role - node role: OM/SCM/DN. * @param startSerialId - start certificate serial id. * @param count - max number of certificates returned in a batch. - * @return * @throws IOException */ @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 5f69d9fee2b..ba52b7fcc5f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -609,7 +609,8 @@ public OzoneConfiguration getConfiguration() { * @param conf HDDS configuration * @param configurator SCM configurator * @return SCM instance - * @throws IOException, AuthenticationException + * @throws IOException on Failure, + * @throws AuthenticationException */ public static StorageContainerManager createSCM( OzoneConfiguration conf, SCMConfigurator configurator) @@ -622,7 +623,8 @@ public static StorageContainerManager createSCM( * * @param conf HDDS configuration * @return SCM instance - * @throws IOException, AuthenticationException + * @throws IOException on Failure, + * @throws AuthenticationException */ public static StorageContainerManager createSCM(OzoneConfiguration conf) throws IOException, AuthenticationException { diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index c9fa668445d..9a3a5c7a8f1 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -335,11 +335,11 @@ private static long monotonicNow() { * *

* TODO: Add lambda support once Java 8 is common. - *

+   * {@code
    *   SystemErrCapturer.withCapture(capture -> {
    *     ...
    *   })
-   * 
+ * } */ public static class SystemErrCapturer implements AutoCloseable { private final ByteArrayOutputStream bytes; @@ -376,11 +376,11 @@ public void close() throws Exception { * *

* TODO: Add lambda support once Java 8 is common. - *

+   * {@code
    *   SystemOutCapturer.withCapture(capture -> {
    *     ...
    *   })
-   * 
+ * } */ public static class SystemOutCapturer implements AutoCloseable { private final ByteArrayOutputStream bytes; @@ -475,8 +475,8 @@ public static final class ReflectionUtils { * This method provides the modifiers field using reflection approach which is compatible * for both pre Java 9 and post java 9 versions. * @return modifiers field - * @throws IllegalAccessException - * @throws NoSuchFieldException + * @throws IllegalAccessException illegalAccessException, + * @throws NoSuchFieldException noSuchFieldException. */ public static Field getModifiersField() throws IllegalAccessException, NoSuchFieldException { Field modifiersField = null; diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java index 661989dade1..d6b028c815f 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java @@ -77,11 +77,13 @@ public interface TimeoutHandler { * is called. This returns the exception passed in (if any), * or generates a new one. *
+   * {@code
    * await(
    *   30 * 1000,
    *   () -> { return 0 == filesystem.listFiles(new Path("/")).length); },
    *   () -> 500),
    *   (timeout, ex) -> ex != null ? ex : new TimeoutException("timeout"));
+   * }
    * 
* * @param timeoutMillis timeout in milliseconds. @@ -160,9 +162,11 @@ public static int await(int timeoutMillis, *

* Example: await for probe to succeed: *

+   * {@code
    * await(
    *   30 * 1000, 500,
    *   () -> { return 0 == filesystem.listFiles(new Path("/")).length); });
+   * }
    * 
* * @param timeoutMillis timeout in milliseconds. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index 44239aafceb..80a495a1d12 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -170,7 +170,7 @@ private static OzoneClient getRpcClient(ClientProtocol clientProtocol, * Create OzoneClient for token renew/cancel operations. * @param conf Configuration to be used for OzoneCient creation * @param token ozone token is involved - * @return + * @return OzoneClient * @throws IOException */ public static OzoneClient getOzoneClient(Configuration conf, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 2235e11de2b..d6320061253 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -119,7 +119,7 @@ public static InetSocketAddress getOmAddress(ConfigurationSource conf) { * Return list of OM addresses by service ids - when HA is enabled. * * @param conf {@link ConfigurationSource} - * @return {service.id -> [{@link InetSocketAddress}]} + * @return {service.id -> [{@link InetSocketAddress}]} */ public static Map> getOmHAAddressesById( ConfigurationSource conf) { @@ -707,7 +707,7 @@ public static void verifyKeyNameWithSnapshotReservedWordForDeletion(String keyNa * Look at 'ozone.om.internal.service.id' first. If configured, return that. * If the above is not configured, look at 'ozone.om.service.ids'. * If count(ozone.om.service.ids) == 1, return that id. - * If count(ozone.om.service.ids) > 1 throw exception + * If count(ozone.om.service.ids) > 1 throw exception * If 'ozone.om.service.ids' is not configured, return null. (Non HA) * @param conf configuration * @return OM service ID. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java index 8ffa3c45c09..c7e20fb7e8b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java @@ -27,7 +27,7 @@ /** * An {@link OutputStream} first write data to a buffer up to the capacity. - * Then, select {@link Underlying} by the number of bytes written. + * Then, select {@code Underlying} by the number of bytes written. * When {@link #flush()}, {@link #hflush()}, {@link #hsync()} * or {@link #close()} is invoked, * it will force flushing the buffer and {@link OutputStream} selection. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java index ae238f1b45a..db00917dacc 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java @@ -80,7 +80,7 @@ T doUnderLock(String lockId, S3SecretFunction action) /** * Default implementation of secret check method. * @param kerberosId kerberos principal. - * @return true if exist associated s3 secret for given {@param kerberosId}, + * @return true if exist associated s3 secret for given {@code kerberosId}, * false if not. */ default boolean hasS3Secret(String kerberosId) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index a0394d6626d..0507a27de61 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -627,7 +627,7 @@ public OmKeyInfo build() { /** * For network transmit. - * @return + * @return KeyInfo */ public KeyInfo getProtobuf(int clientVersion) { return getProtobuf(false, clientVersion); @@ -659,7 +659,7 @@ public KeyInfo getNetworkProtobuf(String fullKeyName, int clientVersion, /** * * @param ignorePipeline true for persist to DB, false for network transmit. - * @return + * @return KeyInfo */ public KeyInfo getProtobuf(boolean ignorePipeline, int clientVersion) { return getProtobuf(ignorePipeline, null, clientVersion, false); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java index 6bab1025b13..ed3d3ee25c2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneIdentityProvider.java @@ -21,7 +21,6 @@ import org.apache.hadoop.ipc.CallerContext; import org.apache.hadoop.ipc.IdentityProvider; import org.apache.hadoop.ipc.Schedulable; -import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,7 +42,7 @@ public OzoneIdentityProvider() { } /** - * If schedulable isn't instance of {@link Server.Call}, + * If schedulable isn't instance of {@link org.apache.hadoop.ipc.Server.Call}, * then trying to access getCallerContext() method, will * result in an exception. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index ccc47e7fc68..f1dd1e9eeba 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -110,9 +110,7 @@ public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo } /** - * - * @param compact, true for persistence, false for network transmit - * @return + * @param compact true for persistence, false for network transmit */ public RepeatedKeyInfo getProto(boolean compact, int clientVersion) { List list = new ArrayList<>(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java index 753d528cb05..a715bfbc153 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccountNameSpace.java @@ -57,7 +57,7 @@ public interface AccountNameSpace { * Get Space Usage Information for this AccountNameSpace. This can be * used for billing purpose. Such Aggregation can also be done lazily * by a Recon job. Implementations can decide. - * @return + * @return SpaceUsage */ SpaceUsageSource getSpaceUsage(); @@ -71,7 +71,7 @@ public interface AccountNameSpace { /** * Get Quota Information for this AccountNameSpace. - * @return + * @return OzoneQuota */ OzoneQuota getQuota(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java index 1481f1b466b..d5ecf7bba80 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/BucketNameSpace.java @@ -74,7 +74,7 @@ public interface BucketNameSpace { * Get Space Usage Information for this BucketNameSpace. This can be * used for billing purpose. Such Aggregation can also be done lazily * by a Recon job. Implementations can decide. - * @return + * @return SpaceUsageSource */ SpaceUsageSource getSpaceUsage(); @@ -88,7 +88,7 @@ public interface BucketNameSpace { /** * Get Quota Information for this BucketNameSpace. - * @return + * @return OzoneQuota */ OzoneQuota getQuota(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 79ddbbf8dad..94822630f8e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -1061,7 +1061,7 @@ DBUpdates getDBUpdates( * @param txnApplyCheckIntervalSeconds Time in SECONDS to wait between * successive checks for all transactions * to be applied to the OM DB. - * @return + * @return {@code long} */ default long prepareOzoneManager( long txnApplyWaitTimeoutSeconds, long txnApplyCheckIntervalSeconds) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 68429c36d08..e4174efcfcc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -43,7 +43,7 @@ * BucketManager uses MetadataDB to store bucket level information. * Keys used in BucketManager for storing data into MetadataDB * for BucketInfo: - * {volume/bucket} -> bucketInfo + * {volume/bucket} -> bucketInfo */ public class BucketManagerImpl implements BucketManager { private static final Logger LOG = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java index a83304ade45..bb682508524 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java @@ -63,7 +63,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_WORKERGROUP_SIZE_KEY; /** - * Separated network server for gRPC transport OzoneManagerService s3g->OM. + * Separated network server for gRPC transport OzoneManagerService s3g->OM. */ public class GrpcOzoneManagerServer { private static final Logger LOG = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index de201fd5d4b..4873a7db491 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -139,7 +139,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager, *

* OM DB Schema: * - * + *

+   * {@code
    * Common Tables:
    * |----------------------------------------------------------------------|
    * |  Column Family     |        VALUE                                    |
@@ -160,7 +161,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
    * |----------------------------------------------------------------------|
    * | transactionInfoTable| #TRANSACTIONINFO -> OMTransactionInfo          |
    * |----------------------------------------------------------------------|
-   *
+   * }
+   * 
+ *
+   * {@code
    * Multi-Tenant Tables:
    * |----------------------------------------------------------------------|
    * | tenantStateTable          | tenantId -> OmDBTenantState              |
@@ -169,8 +173,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
    * |----------------------------------------------------------------------|
    * | principalToAccessIdsTable | userPrincipal -> OmDBUserPrincipalInfo   |
    * |----------------------------------------------------------------------|
-   *
-   *
+   * }
+   * 
+ *
+   * {@code
    * Simple Tables:
    * |----------------------------------------------------------------------|
    * |  Column Family     |        VALUE                                    |
@@ -181,7 +187,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
    * |----------------------------------------------------------------------|
    * | openKey            | /volumeName/bucketName/keyName/id->KeyInfo      |
    * |----------------------------------------------------------------------|
-   *
+   * }
+   * 
+ *
+   * {@code
    * Prefix Tables:
    * |----------------------------------------------------------------------|
    * |  Column Family   |        VALUE                                      |
@@ -195,7 +204,10 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
    * |  deletedDirTable | /volumeId/bucketId/parentId/dirName/objectId ->   |
    * |                  |                                      KeyInfo      |
    * |----------------------------------------------------------------------|
-   *
+   * }
+   * 
+ *
+   * {@code
    * Snapshot Tables:
    * |-------------------------------------------------------------------------|
    * |  Column Family        |        VALUE                                    |
@@ -209,6 +221,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
    * |-------------------------------------------------------------------------|
    * | compactionLogTable    | dbTrxId-compactionTime -> compactionLogEntry    |
    * |-------------------------------------------------------------------------|
+   * }
+   * 
*/ public static final String USER_TABLE = "userTable"; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java index 2301bbbdbf2..c693e529580 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java @@ -51,6 +51,8 @@ private OzoneManagerUtils() { * OzoneManagerStateMachine#runCommand function and ensures sequential * execution path. * Below is the call trace to perform OM client request operation: + *
+   * {@code
    * OzoneManagerStateMachine#applyTransaction ->
    * OzoneManagerStateMachine#runCommand ->
    * OzoneManagerRequestHandler#handleWriteRequest ->
@@ -60,6 +62,8 @@ private OzoneManagerUtils() {
    * OzoneManagerUtils#getBucketLayout ->
    * OzoneManagerUtils#getOmBucketInfo ->
    * omMetadataManager().getBucketTable().get(buckKey)
+   * }
+   * 
*/ public static OmBucketInfo getBucketInfo(OMMetadataManager metaMgr, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 5e324b376fb..5a1612e021a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -407,9 +407,9 @@ private static OMClientRequest getOMAclRequest(OMRequest omRequest, } /** - * Convert exception result to {@link OzoneManagerProtocolProtos.Status}. + * Convert exception result to {@link org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status}. * @param exception - * @return OzoneManagerProtocolProtos.Status + * @return Status */ public static Status exceptionToResponseStatus(Exception exception) { if (exception instanceof OMException) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java index 9ae6b7e5d50..f73255da117 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -45,7 +45,6 @@ public interface RequestAuditor { * @param auditMap * @param throwable * @param userInfo - * @return */ OMAuditLogger.Builder buildAuditMessage( AuditAction op, Map auditMap, Throwable throwable, UserInfo userInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 53d4c83c3a9..802cfa54e60 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -257,7 +257,7 @@ dirKeyInfo, missingParentInfos, result, getBucketLayout(), * @param bucketInfo * @param omPathInfo * @param trxnLogIndex - * @return + * @return {@code List} * @throws IOException */ public static List getAllParentInfo(OzoneManager ozoneManager, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 3e7549b176e..8f2a768c525 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -1050,7 +1050,7 @@ public static long getParentID(long volumeId, long bucketId, String keyName, * @param volumeName - volume name. * @param bucketName - bucket name. * @param keyName - key name. - * @return + * @return {@code long} * @throws IOException */ public static long getParentId(OMMetadataManager omMetadataManager, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index b370c286e0f..f40adb7495f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -246,7 +246,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn * @param keyName - key name. * @param uploadID - Multi part upload ID for this key. * @param omMetadataManager - * @return + * @return {@code String} * @throws IOException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index 72365221d3b..e57b6d99fd4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -395,7 +395,7 @@ private Map buildAuditMap( * level, e.g. source is /vol1/buck1/dir1/key1 and dest is /vol1/buck1). * * @param request - * @return + * @return {@code String} * @throws OMException */ @Override @@ -410,7 +410,7 @@ protected String extractDstKey(RenameKeyRequest request) throws OMException { * Returns the validated and normalized source key name. * * @param keyArgs - * @return + * @return {@code String} * @throws OMException */ @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 729ec57283f..88c5ad91405 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -611,7 +611,7 @@ protected void getFileEncryptionInfoForMpuKey(KeyArgs keyArgs, /** * Get FileEncryptionInfoProto from KeyArgs. * @param keyArgs - * @return + * @return FileEncryptionInfo */ protected FileEncryptionInfo getFileEncryptionInfo(KeyArgs keyArgs) { FileEncryptionInfo encryptionInfo = null; @@ -911,7 +911,7 @@ private OmKeyInfo prepareMultipartFileInfo( * @param keyName - key name. * @param uploadID - Multi part upload ID for this key. * @param omMetadataManager - * @return + * @return {@code String} * @throws IOException */ protected String getDBMultipartOpenKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java index 05b0e5b0cdc..b400fb6ed76 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java @@ -77,7 +77,7 @@ public static Object getINode(Path file) throws IOException { * sst compaction backup directory) * * @param truncateLength - Length of initial path to trim in file path. - * @param hardLinkFiles - Map of link->file paths. + * @param hardLinkFiles - Map of link->file paths. * @return Path to the file of links created. */ public static Path createHardLinkList(int truncateLength, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 432fced0bbb..201a9fe0c9c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -284,7 +284,7 @@ public static String getOzonePathKeyForFso(OMMetadataManager metadataManager, * Returns merged repeatedKeyInfo entry with the existing deleted entry in the table. * @param snapshotMoveKeyInfos keyInfos to be added. * @param metadataManager metadataManager for a store. - * @return + * @return RepeatedOmKeyInfo * @throws IOException */ public static RepeatedOmKeyInfo createMergedRepeatedOmKeyInfoFromDeletedTableEntry( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java index e60362a1ebb..76546f2e480 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java @@ -68,7 +68,7 @@ default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termI } /** - * Implementation of {@link #handleWriteRequest(OMRequest, TermIndex, OzoneManagerDoubleBuffer)}. + * Implementation of {@link #handleWriteRequest}. * * @param omRequest the write request * @param termIndex - ratis transaction term and index diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index da278f17fbf..31889ed2a58 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -714,7 +714,7 @@ private FileStatusAdapter getFileStatusForKeyOrSnapshot(OFSPath ofsPath, URI uri * * @param allUsers return trashRoots of all users if true, used by emptier * @param fs Pointer to the current OFS FileSystem - * @return + * @return {@code Collection} */ public Collection getTrashRoots(boolean allUsers, BasicRootedOzoneFileSystem fs) { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java index f92f8d95704..6354ee0eebe 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java @@ -33,7 +33,7 @@ * information can be converted to this class, and this class can be used to * create hadoop 2.x FileStatus. *

- * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x) + * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x) */ public final class FileStatusAdapter { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java index b0a9681c5b8..472cdb62a66 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/AccessHeatMapEndpoint.java @@ -65,13 +65,14 @@ public AccessHeatMapEndpoint(HeatMapServiceImpl heatMapService) { * with volume, buckets under that volume, * then directories, subdirectories and paths * under that bucket. - * E.g. -------->> + *

+   * E.g. -------->>
    * vol1                           vol2
    * - bucket1                      - bucket2
    * - dir1/dir2/key1               - dir4/dir1/key1
    * - dir1/dir2/key2               - dir4/dir5/key2
    * - dir1/dir3/key1               - dir5/dir3/key1
-   *
+   * 
* @return {@link Response} */ @GET diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index c4d2d35bef9..33fc4fd96de 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -157,15 +157,15 @@ public ContainerEndpoint(OzoneStorageContainerManager reconSCM, } /** - * Return {@linkplain org.apache.hadoop.hdds.scm.container} + * Return {@code org.apache.hadoop.hdds.scm.container} * for the containers starting from the given "prev-key" query param for the * given "limit". The given "prev-key" is skipped from the results returned. * * @param prevKey the containerID after which results are returned. - * start containerID, >=0, + * start containerID, >=0, * start searching at the head if 0. * @param limit max no. of containers to get. - * count must be >= 0 + * count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @return {@link Response} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 9cd6fa33d03..58d2cd31076 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -245,13 +245,15 @@ public Map searchOpenKeysInFSO(String startPrefix, * This method transforms a user-provided path (e.g., "volume/bucket/dir1") into * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names * with their corresponding IDs. It simplifies database queries for FSO bucket operations. - * + *
+   * {@code
    * Examples:
    * - Input: "volume/bucket/key" -> Output: "/volumeID/bucketID/parentDirID/key"
    * - Input: "volume/bucket/dir1" -> Output: "/volumeID/bucketID/dir1ID/"
    * - Input: "volume/bucket/dir1/key1" -> Output: "/volumeID/bucketID/dir1ID/key1"
    * - Input: "volume/bucket/dir1/dir2" -> Output: "/volumeID/bucketID/dir2ID/"
-   *
+   * }
+   * 
* @param prevKeyPrefix The path to be converted. * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. * @throws IOException If database access fails. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 266caaa2d8e..a2db616ec2f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -118,7 +118,7 @@ public static String buildSubpath(String path, String nextLevel) { } /** - * Example: /vol1/buck1/a/b/c/d/e/file1.txt -> a/b/c/d/e/file1.txt. + * Example: {@literal /vol1/buck1/a/b/c/d/e/file1.txt -> a/b/c/d/e/file1.txt} . * @param names parsed request * @return key name */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 1fc114eabd7..14ae997073c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -109,7 +109,7 @@ List listBucketsUnderVolume( /** * Return the OzoneConfiguration instance used by Recon. - * @return + * @return OzoneConfiguration */ OzoneConfiguration getOzoneConfiguration(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java index bfb2b05aad3..44595a43b79 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java @@ -70,7 +70,7 @@ void batchStoreContainerKeyMapping(BatchOperation batch, Integer count) throws IOException; /** - * Store the containerID -> no. of keys count into the container DB store. + * Store the containerID -> no. of keys count into the container DB store. * * @param containerID the containerID. * @param count count of the keys within the given containerID. @@ -80,7 +80,7 @@ void batchStoreContainerKeyMapping(BatchOperation batch, void storeContainerKeyCount(Long containerID, Long count) throws IOException; /** - * Store the containerID -> no. of keys count into a batch. + * Store the containerID -> no. of keys count into a batch. * * @param batch the batch operation we store into * @param containerID the containerID. @@ -91,7 +91,7 @@ void batchStoreContainerKeyCounts(BatchOperation batch, Long containerID, Long count) throws IOException; /** - * Store the containerID -> ContainerReplicaWithTimestamp mapping to the + * Store the containerID -> ContainerReplicaWithTimestamp mapping to the * container DB store. * * @param containerID the containerID. @@ -159,7 +159,7 @@ Map getContainerReplicaHistory( * Get the stored key prefixes for the given containerId. * * @param containerId the given containerId. - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getKeyPrefixesForContainer( long containerId) throws IOException; @@ -170,7 +170,7 @@ Map getKeyPrefixesForContainer( * * @param containerId the given containerId. * @param prevKeyPrefix the key prefix to seek to and start scanning. - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getKeyPrefixesForContainer( long containerId, String prevKeyPrefix) throws IOException; @@ -182,7 +182,7 @@ Map getKeyPrefixesForContainer( * * @param limit the no. of containers to fetch. * @param prevContainer containerID after which the results are returned. - * @return Map of containerID -> containerMetadata. + * @return Map of containerID -> containerMetadata. * @throws IOException */ Map getContainers(int limit, long prevContainer) @@ -256,7 +256,7 @@ void commitBatchOperation(RDBBatchOperation rdbBatchOperation) * * @param prevKeyPrefix the key prefix to seek to and start scanning. * @param keyVersion the key version to seek - * @return Map of Key prefix -> count. + * @return Map of Key prefix -> count. */ Map getContainerForKeyPrefixes( String prevKeyPrefix, long keyVersion) throws IOException; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java index 88dffbacd20..42908a775a4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java @@ -191,7 +191,7 @@ public void batchStoreContainerKeyMapping(BatchOperation batch, } /** - * Store the containerID -> no. of keys count into the container DB store. + * Store the containerID -> no. of keys count into the container DB store. * * @param containerID the containerID. * @param count count of the keys within the given containerID. @@ -204,7 +204,7 @@ public void storeContainerKeyCount(Long containerID, Long count) } /** - * Store the containerID -> no. of keys count into a batch. + * Store the containerID -> no. of keys count into a batch. * * @param batch the batch we store into * @param containerID the containerID. @@ -219,7 +219,7 @@ public void batchStoreContainerKeyCounts(BatchOperation batch, } /** - * Store the ContainerID -> ContainerReplicaHistory (container first and last + * Store the ContainerID -> ContainerReplicaHistory (container first and last * seen time) mapping to the container DB store. * * @param containerID the containerID. @@ -417,7 +417,7 @@ public Map getKeyPrefixesForContainer( } /** - * Iterate the DB to construct a Map of containerID -> containerMetadata + * Iterate the DB to construct a Map of containerID -> containerMetadata * only for the given limit from the given start key. The start containerID * is skipped from the result. * @@ -426,7 +426,7 @@ public Map getKeyPrefixesForContainer( * @param limit No of containers to get. * @param prevContainer containerID after which the * list of containers are scanned. - * @return Map of containerID -> containerMetadata. + * @return Map of containerID -> containerMetadata. * @throws IOException on failure. */ @Override diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java index fd5d8864080..bf34c9f8930 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java @@ -57,7 +57,7 @@ /** * Class to iterate over the OM DB and populate the Recon container DB with - * the container -> Key reverse mapping. + * the container -> Key reverse mapping. */ public class ContainerKeyMapperTask implements ReconOmTask { @@ -81,8 +81,8 @@ public ContainerKeyMapperTask(ReconContainerMetadataManager } /** - * Read Key -> ContainerId data from OM snapshot DB and write reverse map - * (container, key) -> count to Recon Container DB. + * Read Key -> ContainerId data from OM snapshot DB and write reverse map + * (container, key) -> count to Recon Container DB. */ @Override public Pair reprocess(OMMetadataManager omMetadataManager) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java index e904334bb31..2092d6a326c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconOmTask.java @@ -35,14 +35,14 @@ public interface ReconOmTask { /** * Process a set of OM events on tables that the task is listening on. * @param events Set of events to be processed by the task. - * @return Pair of task name -> task success. + * @return Pair of task name -> task success. */ Pair process(OMUpdateEventBatch events); /** * Process a on tables that the task is listening on. * @param omMetadataManager OM Metadata manager instance. - * @return Pair of task name -> task success. + * @return Pair of task name -> task success. */ Pair reprocess(OMMetadataManager omMetadataManager); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java index d66a7279cce..1a514ceb90b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java @@ -53,7 +53,7 @@ void reInitializeTasks(ReconOMMetadataManager omMetadataManager) /** * Get set of registered tasks. - * @return Map of Task name -> Task. + * @return Map of Task name -> Task. */ Map getRegisteredTasks(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java index ef7a85a4121..0233c14470a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java @@ -28,9 +28,9 @@ * Generate buckets via the s3 interface. * * For a secure cluster, - * $> init user keytab - * $> kinit -kt /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM - * $> eval $(ozone s3 getsecret -e) + * $> init user keytab + * $> kinit -kt /etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM + * $> eval $(ozone s3 getsecret -e) * for getting and exporting access_key_id and secret_access_key * to freon shell test environment * secret access key. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java index 76ac3144d45..b0ac5b0033e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java @@ -67,8 +67,8 @@ public static void main(String[] args) throws IOException { /** * Generates Container Id to Blocks and BlockDetails mapping. * @param configuration @{@link OzoneConfiguration} - * @return {@code Map>>} - * Map of ContainerId -> (Block, Block info) + * @return {@code Map>> + * Map of ContainerId -> (Block, Block info)} * @throws IOException */ public Map>> diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java index 8fbab644c0e..c06d29a7f93 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java @@ -21,7 +21,7 @@ import picocli.CommandLine; /** - * Option for {@link OzoneObj.StoreType}. + * Option for {@link org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType}. */ public class StoreTypeOption implements CommandLine.ITypeConverter { diff --git a/pom.xml b/pom.xml index ffc747d4a36..2180d0af257 100644 --- a/pom.xml +++ b/pom.xml @@ -265,7 +265,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.6.1 1.7.0 3.5.0 - 3.7.0 + 3.10.0 3.7.1 0.16.1 3.1.3 @@ -1421,7 +1421,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-javadoc-plugin ${maven-javadoc-plugin.version} - -Xdoclint:none + none @@ -1883,8 +1883,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - - dist