From 156d242917dcba0b905ceba6f380c97e77c334bb Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Tue, 9 Apr 2024 22:02:53 +0530 Subject: [PATCH 01/11] HDDS-10634. Recon - listKeys API for listing of OBS , FSO and Legacy bucket keys with filters. --- .../hadoop/ozone/recon/ReconConstants.java | 2 + .../apache/hadoop/ozone/recon/ReconUtils.java | 29 +++ .../ozone/recon/api/NSSummaryEndpoint.java | 103 ++++++++++- .../api/handlers/BucketEntityHandler.java | 9 +- .../recon/api/handlers/BucketHandler.java | 14 +- .../api/handlers/DirectoryEntityHandler.java | 9 +- .../recon/api/handlers/EntityHandler.java | 6 +- .../recon/api/handlers/FSOBucketHandler.java | 88 +++++++-- .../recon/api/handlers/KeyEntityHandler.java | 2 +- .../api/handlers/LegacyBucketHandler.java | 10 +- .../recon/api/handlers/OBSBucketHandler.java | 10 +- .../recon/api/handlers/RootEntityHandler.java | 8 +- .../api/handlers/UnknownEntityHandler.java | 2 +- .../api/handlers/VolumeEntityHandler.java | 6 +- .../ozone/recon/api/types/DUResponse.java | 36 ++++ .../ozone/recon/api/types/NSSummary.java | 14 +- .../ozone/recon/codec/NSSummaryCodec.java | 11 +- .../ozone/recon/heatmap/HeatMapUtil.java | 2 +- .../tasks/NSSummaryTaskDbEventHandler.java | 4 + .../recon/OMMetadataManagerTestUtils.java | 72 ++++++-- .../api/TestNSSummaryEndpointWithFSO.java | 169 ++++++++++++++++++ ...TestNSSummaryEndpointWithOBSAndLegacy.java | 158 +++++++++++++++- .../TestReconNamespaceSummaryManagerImpl.java | 6 +- 23 files changed, 705 insertions(+), 65 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index 134092146e5..bea4718cc11 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -39,6 +39,8 @@ private ReconConstants() { public static final String DEFAULT_OPEN_KEY_INCLUDE_NON_FSO = "false"; public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false"; public static final String DEFAULT_FETCH_COUNT = "1000"; + public static final String DEFAULT_KEY_SIZE = "0"; + public static final String DEFAULT_START_VALUE = "1"; public static final String DEFAULT_BATCH_NUMBER = "1"; public static final String RECON_QUERY_BATCH_PARAM = "batchNum"; public static final String RECON_QUERY_PREVKEY = "prevKey"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 39d091ee03c..2d59e61c671 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -29,8 +29,13 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.sql.Timestamp; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.time.Instant; import java.util.ArrayList; +import java.util.Date; import java.util.List; +import java.util.TimeZone; import java.util.concurrent.BlockingQueue; import com.google.common.base.Preconditions; @@ -105,6 +110,30 @@ public static File getReconScmDbDir(ConfigurationSource conf) { return queues; } + /** + * Converts string date in a provided format to server timezone's epoch milllioseconds. + * + * @param dateString + * @param dateFormat + * @param timeZone + * @return + * @throws ParseException + */ + public static long convertToEpochMillis(String dateString, String dateFormat, TimeZone timeZone) { + try { + SimpleDateFormat sdf = new SimpleDateFormat(dateFormat); + sdf.setTimeZone(timeZone); // Set server's timezone + Date date = sdf.parse(dateString); + return date.getTime(); // Convert to epoch milliseconds + } catch (ParseException parseException) { + LOG.error("Date parse exception for date: {} in format: {} -> {}", dateString, dateFormat, parseException); + return Instant.now().toEpochMilli(); + } catch (Exception exception) { + LOG.error("Unexpected error while parsing date: {} in format: {}", dateString, dateFormat); + return Instant.now().toEpochMilli(); + } + } + /** * Get configured Recon DB directory value based on config. If not present, * fallback to ozone.metadata.dirs diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java index 5b104c46115..d9d613993b7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java @@ -18,8 +18,10 @@ package org.apache.hadoop.ozone.recon.api; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -29,6 +31,8 @@ import org.apache.hadoop.ozone.recon.api.types.EntityType; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.inject.Inject; import javax.ws.rs.DefaultValue; @@ -39,6 +43,14 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import java.io.IOException; +import java.util.List; +import java.util.TimeZone; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE; /** * REST APIs for namespace metadata summary. @@ -47,6 +59,8 @@ @Produces(MediaType.APPLICATION_JSON) @AdminOnly public class NSSummaryEndpoint { + private static final Logger LOG = LoggerFactory.getLogger( + NSSummaryEndpoint.class); private final ReconNamespaceSummaryManager reconNamespaceSummaryManager; @@ -128,7 +142,94 @@ public Response getDiskUsage(@QueryParam("path") String path, omMetadataManager, reconSCM, path); duResponse = handler.getDuResponse( - listFile, withReplica); + listFile, withReplica, false); + + return Response.ok(duResponse).build(); + } + + /** + * This API will list out limited 'count' number of keys after applying below filters in API parameters: + * Default Values of API param filters: + * -- replicationType - RATIS + * -- creationTime - empty string and filter will not be applied, so list out keys irrespective of age. + * -- keySize - 0 bytes, which means all keys greater than zero bytes will be listed, effectively all. + * -- startPrefix - / + * -- count - 1000 + * + * @param replicationType Filter for RATIS or EC replication keys + * @param creationDate Filter for keys created after creationDate in "MM-dd-yyyy HH:mm:ss" string format. + * @param keySize Filter for Keys greater than keySize in bytes. + * @param startPrefix Filter for startPrefix path. + * @param count Filter for limited count of keys. + * @return the list of keys in below structured format: + * Response For OBS Bucket keys: + * ******************************************************** + * { + * "status": "OK", + * "path": "/volume1/obs-bucket/", + * "size": 73400320, + * "sizeWithReplica": 81788928, + * "subPathCount": 1, + * "subPaths": [ + * { + * "key": true, + * "path": "key7", + * "size": 10485760, + * "sizeWithReplica": 18874368, + * "isKey": true, + * "replicationType": "EC", + * "creationTime": 1712321367060, + * "modificationTime": 1712321368190 + * } + * ], + * "sizeDirectKey": 73400320 + * } + * ******************************************************** + * @throws IOException + */ + @GET + @Path("/listKeys") + @SuppressWarnings("methodlength") + public Response listKeysWithDu(@DefaultValue("RATIS") @QueryParam("replicationType") String replicationType, + @QueryParam("creationDate") String creationDate, + @DefaultValue(DEFAULT_KEY_SIZE) @QueryParam("keySize") long keySize, + @DefaultValue(OM_KEY_PREFIX) @QueryParam("startPrefix") String startPrefix, + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam("count") long count, + @DefaultValue("false") @QueryParam("recursive") boolean recursive) + throws IOException { + + if (startPrefix == null || startPrefix.length() == 0) { + return Response.status(Response.Status.BAD_REQUEST).build(); + } + DUResponse duResponse = new DUResponse(); + if (!isInitializationComplete()) { + duResponse.setStatus(ResponseStatus.INITIALIZING); + return Response.ok(duResponse).build(); + } + EntityHandler handler = EntityHandler.getEntityHandler( + reconNamespaceSummaryManager, + omMetadataManager, reconSCM, startPrefix); + + duResponse = handler.getListKeysResponse(count, recursive); + + List keyListWithDu = duResponse.getDuData(); + + long epochMillis = ReconUtils.convertToEpochMillis(creationDate, "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); + Predicate keyAgeFilter = keyData -> keyData.getCreationTime() >= epochMillis; + Predicate keyReplicationFilter = + keyData -> keyData.getReplicationType().equals(replicationType); + Predicate keySizeFilter = keyData -> keyData.getSize() > keySize; + Predicate keyFilter = keyData -> keyData.isKey(); + + List filteredKeyList = keyListWithDu.stream() + .filter(keyData -> !StringUtils.isEmpty(creationDate) ? keyAgeFilter.test(keyData) : true) + .filter(keyData -> keyData.getReplicationType() != null ? keyReplicationFilter.test(keyData) : true) + .filter(keySizeFilter) + .filter(keyFilter) + .collect(Collectors.toList()); + + duResponse.setDuData(filteredKeyList); + duResponse.setCount(filteredKeyList.size()); return Response.ok(duResponse).build(); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java index 7ad961195ee..563cf0b7704 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java @@ -30,6 +30,8 @@ import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; @@ -40,6 +42,7 @@ * Class for handling bucket entity type. */ public class BucketEntityHandler extends EntityHandler { + private static final Logger LOG = LoggerFactory.getLogger(BucketEntityHandler.class); public BucketEntityHandler( ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager, @@ -87,7 +90,7 @@ private BucketObjectDBInfo getBucketObjDbInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean recursive) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -109,6 +112,7 @@ public DUResponse getDuResponse( long bucketDataSize = duResponse.getKeySize(); long bucketDataSizeWithReplica = 0L; for (long subdirObjectId: bucketSubdirs) { + List diskUsageList = new ArrayList<>(); NSSummary subdirNSSummary = getReconNamespaceSummaryManager() .getNSSummary(subdirObjectId); @@ -124,12 +128,13 @@ public DUResponse getDuResponse( if (withReplica) { long dirDU = getBucketHandler() - .calculateDUUnderObject(subdirObjectId); + .calculateDUUnderObject(subdirObjectId, recursive, diskUsageList); diskUsage.setSizeWithReplica(dirDU); bucketDataSizeWithReplica += dirDU; } diskUsage.setSize(dataSize); dirDUData.add(diskUsage); + dirDUData.addAll(diskUsageList); } // Either listFile or withReplica is enabled, we need the directKeys info if (listFile || withReplica) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 266caaa2d8e..93c2a12d655 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -77,17 +77,17 @@ public ReconNamespaceSummaryManager getReconNamespaceSummaryManager() { public abstract EntityType determineKeyPath(String keyName) throws IOException; - public abstract long calculateDUUnderObject(long parentId) - throws IOException; - - public abstract long handleDirectKeys(long parentId, - boolean withReplica, boolean listFile, - List duData, - String normalizedPath) throws IOException; + public abstract long calculateDUUnderObject(long parentId, boolean recursive, + List diskUsageList) throws IOException; public abstract long getDirObjectId(String[] names) throws IOException; + public abstract long handleDirectKeys(long parentId, + boolean withReplica, boolean listFile, + List duData, + String normalizedPath) throws IOException; + public abstract long getDirObjectId(String[] names, int cutoff) throws IOException; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java index fc7022e2dab..75001ba6e68 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java @@ -80,7 +80,7 @@ private ObjectDBInfo getDirectoryObjDbInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean recursive) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -103,6 +103,7 @@ public DUResponse getDuResponse( List subdirDUData = new ArrayList<>(); // iterate all subdirectories to get disk usage data for (long subdirObjectId: subdirs) { + List diskUsageList = new ArrayList<>(); NSSummary subdirNSSummary = getReconNamespaceSummaryManager().getNSSummary(subdirObjectId); // for the subdirName we need the subdir filename, not the key name @@ -133,13 +134,15 @@ public DUResponse getDuResponse( if (withReplica) { long subdirDU = getBucketHandler() - .calculateDUUnderObject(subdirObjectId); + .calculateDUUnderObject(subdirObjectId, recursive, diskUsageList); diskUsage.setSizeWithReplica(subdirDU); dirDataSizeWithReplica += subdirDU; } - diskUsage.setSize(dataSize); subdirDUData.add(diskUsage); + if (recursive) { + subdirDUData.addAll(diskUsageList); + } } // handle direct keys under directory diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index 4f9e68ddff9..c434208f865 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -79,9 +79,13 @@ public abstract NamespaceSummaryResponse getSummaryResponse() throws IOException; public abstract DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean recursive) throws IOException; + public DUResponse getListKeysResponse(long count, boolean recursive) throws IOException { + return getDuResponse(true, true, recursive); + } + public abstract QuotaUsageResponse getQuotaResponse() throws IOException; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java index 8a1c5babe75..ac0a0802c37 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -30,8 +31,6 @@ import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.file.Paths; @@ -45,11 +44,9 @@ * Class for handling FSO buckets NameSpaceSummaries. */ public class FSOBucketHandler extends BucketHandler { - private static final Logger LOG = - LoggerFactory.getLogger(FSOBucketHandler.class); private final long volumeId; private final long bucketId; - + public FSOBucketHandler( ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager, @@ -75,7 +72,7 @@ public FSOBucketHandler( */ @Override public EntityType determineKeyPath(String keyName) - throws IOException { + throws IOException { java.nio.file.Path keyPath = Paths.get(keyName); Iterator elements = keyPath.iterator(); @@ -92,14 +89,14 @@ public EntityType determineKeyPath(String keyName) String dbNodeName = getOmMetadataManager().getOzonePathKey(volumeId, bucketId, lastKnownParentId, fileName); omDirInfo = getOmMetadataManager().getDirectoryTable() - .getSkipCache(dbNodeName); + .getSkipCache(dbNodeName); if (omDirInfo != null) { lastKnownParentId = omDirInfo.getObjectID(); } else if (!elements.hasNext()) { // reached last path component. Check file exists for the given path. OmKeyInfo omKeyInfo = getOmMetadataManager().getFileTable() - .getSkipCache(dbNodeName); + .getSkipCache(dbNodeName); // The path exists as a file if (omKeyInfo != null) { omKeyInfo.setKeyName(keyName); @@ -121,13 +118,13 @@ public EntityType determineKeyPath(String keyName) // FileTable's key is in the format of "volumeId/bucketId/parentId/fileName" // Make use of RocksDB's order to seek to the prefix and avoid full iteration @Override - public long calculateDUUnderObject(long parentId) + public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList) throws IOException { Table keyTable = getOmMetadataManager().getFileTable(); long totalDU = 0L; try (TableIterator> - iterator = keyTable.iterator()) { + iterator = keyTable.iterator()) { String seekPrefix = OM_KEY_PREFIX + volumeId + @@ -146,6 +143,9 @@ public long calculateDUUnderObject(long parentId) break; } OmKeyInfo keyInfo = kv.getValue(); + if (recursive) { + populateDiskUsage(keyInfo, diskUsageList); + } if (keyInfo != null) { totalDU += keyInfo.getReplicatedSize(); } @@ -154,7 +154,7 @@ public long calculateDUUnderObject(long parentId) // handle nested keys (DFS) NSSummary nsSummary = getReconNamespaceSummaryManager() - .getNSSummary(parentId); + .getNSSummary(parentId); // empty bucket if (nsSummary == null) { return 0; @@ -162,11 +162,64 @@ public long calculateDUUnderObject(long parentId) Set subDirIds = nsSummary.getChildDir(); for (long subDirId: subDirIds) { - totalDU += calculateDUUnderObject(subDirId); + totalDU += calculateDUUnderObject(subDirId, recursive, diskUsageList); } return totalDU; } + private void populateDiskUsage(OmKeyInfo keyInfo, List diskUsageList) throws IOException { + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + diskUsage.setKey(keyInfo.isFile()); + diskUsage.setSubpath(constructFullPath(keyInfo, getReconNamespaceSummaryManager())); + diskUsage.setSize(keyInfo.getDataSize()); + diskUsage.setSizeWithReplica(keyInfo.getReplicatedSize()); + diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name()); + diskUsage.setCreationTime(keyInfo.getCreationTime()); + diskUsage.setModificationTime(keyInfo.getModificationTime()); + + diskUsageList.add(diskUsage); + } + + /** + * Constructs the full path of a key from its OmKeyInfo using a bottom-up approach, starting from the leaf node. + *

+ * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched + * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from + * bottom to top, finally prepending the volume and bucket names to complete the full path. + * + * @param omKeyInfo The OmKeyInfo object for the key + * @return The constructed full path of the key as a String. + * @throws IOException + */ + public static String constructFullPath(OmKeyInfo omKeyInfo, + ReconNamespaceSummaryManager reconNamespaceSummaryManager) + throws IOException { + StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); + long parentId = omKeyInfo.getParentObjectID(); + boolean isDirectoryPresent = false; + while (parentId != -1) { + NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId); + if (nsSummary == null) { + break; + } + // Prepend the directory name to the path + fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX); + + // Move to the parent ID of the current directory + parentId = nsSummary.getParentId(); + isDirectoryPresent = true; + } + + // Prepend the volume and bucket to the constructed path + String volumeName = omKeyInfo.getVolumeName(); + String bucketName = omKeyInfo.getBucketName(); + fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX); + if (isDirectoryPresent) { + return OmUtils.normalizeKey(fullPath.toString(), true); + } + return fullPath.toString(); + } + /** * This method handles disk usage of direct keys. * @param parentId parent directory/bucket @@ -188,7 +241,7 @@ public long handleDirectKeys(long parentId, boolean withReplica, long keyDataSizeWithReplica = 0L; try (TableIterator> - iterator = keyTable.iterator()) { + iterator = keyTable.iterator()) { String seekPrefix = OM_KEY_PREFIX + volumeId + @@ -214,6 +267,9 @@ public long handleDirectKeys(long parentId, boolean withReplica, diskUsage.setSubpath(subpath); diskUsage.setKey(true); diskUsage.setSize(keyInfo.getDataSize()); + diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name()); + diskUsage.setCreationTime(keyInfo.getCreationTime()); + diskUsage.setModificationTime(keyInfo.getModificationTime()); if (withReplica) { long keyDU = keyInfo.getReplicatedSize(); @@ -257,9 +313,9 @@ public long getDirObjectId(String[] names, int cutoff) throws IOException { String dirKey; for (int i = 2; i < cutoff; ++i) { dirKey = getOmMetadataManager().getOzonePathKey(getVolumeObjectId(names), - getBucketObjectId(names), dirObjectId, names[i]); + getBucketObjectId(names), dirObjectId, names[i]); OmDirectoryInfo dirInfo = - getOmMetadataManager().getDirectoryTable().getSkipCache(dirKey); + getOmMetadataManager().getDirectoryTable().getSkipCache(dirKey); if (null != dirInfo) { dirObjectId = dirInfo.getObjectID(); } @@ -279,7 +335,7 @@ public OmKeyInfo getKeyInfo(String[] names) throws IOException { String fileName = names[names.length - 1]; String ozoneKey = getOmMetadataManager().getOzonePathKey(volumeId, bucketId, - parentObjectId, fileName); + parentObjectId, fileName); return getOmMetadataManager().getFileTable().getSkipCache(ozoneKey); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java index a687bf3d0bd..1ee9bca3a2c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java @@ -71,7 +71,7 @@ private ObjectDBInfo getKeyDbObjectInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean recursive) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index 09f1c5bc745..4f4144211bd 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -110,12 +110,15 @@ public EntityType determineKeyPath(String keyName) * Make use of RocksDB's order to seek to the prefix and avoid full iteration. * Calculating DU only for keys. Skipping any directories and * handling only direct keys. + * * @param parentId + * @param recursive + * @param diskUsageList * @return total DU of direct keys under object * @throws IOException */ @Override - public long calculateDUUnderObject(long parentId) + public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList) throws IOException { Table keyTable = getKeyTable(); @@ -174,7 +177,7 @@ public long calculateDUUnderObject(long parentId) // handle nested keys (DFS) Set subDirIds = nsSummary.getChildDir(); for (long subDirId: subDirIds) { - totalDU += calculateDUUnderObject(subDirId); + totalDU += calculateDUUnderObject(subDirId, recursive, diskUsageList); } return totalDU; } @@ -250,6 +253,9 @@ public long handleDirectKeys(long parentId, boolean withReplica, diskUsage.setSubpath(subpath); diskUsage.setKey(true); diskUsage.setSize(keyInfo.getDataSize()); + diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name()); + diskUsage.setCreationTime(keyInfo.getCreationTime()); + diskUsage.setModificationTime(keyInfo.getModificationTime()); if (withReplica) { long keyDU = keyInfo.getReplicatedSize(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java index 024eec989a1..76cbc3365e4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -145,6 +145,9 @@ public long handleDirectKeys(long parentId, boolean withReplica, diskUsage.setSubpath(objectName); diskUsage.setKey(true); diskUsage.setSize(keyInfo.getDataSize()); + diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name()); + diskUsage.setCreationTime(keyInfo.getCreationTime()); + diskUsage.setModificationTime(keyInfo.getModificationTime()); if (withReplica) { long keyDU = keyInfo.getReplicatedSize(); @@ -168,12 +171,15 @@ public long handleDirectKeys(long parentId, boolean withReplica, * Since OBS buckets operate on a flat hierarchy, this method iterates through * all the keys in the bucket without the need to traverse directories. * - * @param parentId The identifier for the parent bucket. + * @param parentId The identifier for the parent bucket. + * @param recursive + * @param diskUsageList * @return The total disk usage of all keys within the specified OBS bucket. * @throws IOException */ @Override - public long calculateDUUnderObject(long parentId) throws IOException { + public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList) + throws IOException { // Initialize the total disk usage variable. long totalDU = 0L; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java index fd0e58f191a..83353c1759e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java @@ -88,7 +88,7 @@ private ObjectDBInfo getPrefixObjDbInfo() @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean recursive) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -100,6 +100,7 @@ public DUResponse getDuResponse( long totalDataSize = 0L; long totalDataSizeWithReplica = 0L; for (OmVolumeArgs volume: volumes) { + List diskUsageList = new ArrayList<>(); String volumeName = volume.getVolume(); String subpath = omMetadataManager.getVolumeKey(volumeName); DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); @@ -119,7 +120,7 @@ public DUResponse getDuResponse( BucketHandler.getBucketHandler( getReconNamespaceSummaryManager(), getOmMetadataManager(), getReconSCM(), bucket); - volumeDU += bucketHandler.calculateDUUnderObject(bucketObjectID); + volumeDU += bucketHandler.calculateDUUnderObject(bucketObjectID, recursive, diskUsageList); } } totalDataSize += dataSize; @@ -132,6 +133,7 @@ public DUResponse getDuResponse( } diskUsage.setSize(dataSize); volumeDuData.add(diskUsage); + volumeDuData.addAll(diskUsageList); } if (withReplica) { duResponse.setSizeWithReplica(totalDataSizeWithReplica); @@ -148,7 +150,7 @@ public QuotaUsageResponse getQuotaResponse() QuotaUsageResponse quotaUsageResponse = new QuotaUsageResponse(); SCMNodeStat stats = getReconSCM().getScmNodeManager().getStats(); long quotaInBytes = stats.getCapacity().get(); - long quotaUsedInBytes = getDuResponse(true, true).getSizeWithReplica(); + long quotaUsedInBytes = getDuResponse(true, true, false).getSizeWithReplica(); quotaUsageResponse.setQuota(quotaInBytes); quotaUsageResponse.setQuotaUsed(quotaUsedInBytes); return quotaUsageResponse; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java index b5a5bd9a0be..307e160fed0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java @@ -51,7 +51,7 @@ public NamespaceSummaryResponse getSummaryResponse() @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean recursive) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setStatus(ResponseStatus.PATH_NOT_FOUND); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java index fae508a99c9..f5cb4f6f6b9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java @@ -92,7 +92,7 @@ private VolumeObjectDBInfo getVolumeObjDbInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica) + boolean listFile, boolean withReplica, boolean recursive) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -107,6 +107,7 @@ public DUResponse getDuResponse( long volDataSize = 0L; long volDataSizeWithReplica = 0L; for (OmBucketInfo bucket: buckets) { + List diskUsageList = new ArrayList<>(); String bucketName = bucket.getBucketName(); long bucketObjectID = bucket.getObjectID(); String subpath = getOmMetadataManager().getBucketKey(volName, bucketName); @@ -120,12 +121,13 @@ public DUResponse getDuResponse( getReconNamespaceSummaryManager(), getOmMetadataManager(), getReconSCM(), bucket); long bucketDU = bucketHandler - .calculateDUUnderObject(bucketObjectID); + .calculateDUUnderObject(bucketObjectID, recursive, diskUsageList); diskUsage.setSizeWithReplica(bucketDU); volDataSizeWithReplica += bucketDU; } diskUsage.setSize(dataSize); bucketDuData.add(diskUsage); + bucketDuData.addAll(diskUsageList); } if (withReplica) { duResponse.setSizeWithReplica(volDataSizeWithReplica); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java index b28d9d39c21..dff5bd3a14e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java @@ -138,6 +138,18 @@ public static class DiskUsage { @JsonProperty("isKey") private boolean isKey; + /** Indicate if the key replication type RATIS or EC. */ + @JsonProperty("replicationType") + private String replicationType; + + /** key creation time. */ + @JsonProperty("creationTime") + private long creationTime; + + /** key modification time. */ + @JsonProperty("modificationTime") + private long modificationTime; + public DiskUsage() { this.sizeWithReplica = -1L; this.isKey = false; @@ -174,5 +186,29 @@ public void setKey(boolean key) { public boolean isKey() { return isKey; } + + public String getReplicationType() { + return replicationType; + } + + public void setReplicationType(String replicationType) { + this.replicationType = replicationType; + } + + public long getCreationTime() { + return creationTime; + } + + public void setCreationTime(long creationTime) { + this.creationTime = creationTime; + } + + public long getModificationTime() { + return modificationTime; + } + + public void setModificationTime(long modificationTime) { + this.modificationTime = modificationTime; + } } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java index c0f93aebe97..fa16d03abe4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java @@ -36,22 +36,24 @@ public class NSSummary { private int[] fileSizeBucket; private Set childDir; private String dirName; + private long parentId = -1; public NSSummary() { this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], - new HashSet<>(), ""); + new HashSet<>(), "", -1); // -1 can be a default value indicating no parent } public NSSummary(int numOfFiles, long sizeOfFiles, int[] bucket, Set childDir, - String dirName) { + String dirName, long parentId) { this.numOfFiles = numOfFiles; this.sizeOfFiles = sizeOfFiles; setFileSizeBucket(bucket); this.childDir = childDir; this.dirName = dirName; + this.parentId = parentId; } public int getNumOfFiles() { @@ -95,6 +97,14 @@ public void setDirName(String dirName) { this.dirName = removeTrailingSlashIfNeeded(dirName); } + public long getParentId() { + return parentId; + } + + public void setParentId(long parentId) { + this.parentId = parentId; + } + public void addChildDir(long childId) { if (this.childDir.contains(childId)) { return; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index 09e0b258793..c87f9a6e5d5 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -65,9 +65,10 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { int stringLen = dirName.getBytes(StandardCharsets.UTF_8).length; int numOfChildDirs = childDirs.size(); final int resSize = NUM_OF_INTS * Integer.BYTES - + (numOfChildDirs + 1) * Long.BYTES // 1 long field + list size + + (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + list size + Short.BYTES // 2 dummy shorts to track length - + stringLen; // directory name length + + stringLen // directory name length + + Long.BYTES; // Added space for parentId serialization ByteArrayOutputStream out = new ByteArrayOutputStream(resSize); out.write(integerCodec.toPersistedFormat(object.getNumOfFiles())); @@ -84,6 +85,7 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException { } out.write(integerCodec.toPersistedFormat(stringLen)); out.write(stringCodec.toPersistedFormat(dirName)); + out.write(longCodec.toPersistedFormat(object.getParentId())); return out.toByteArray(); } @@ -110,6 +112,8 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { int strLen = in.readInt(); if (strLen == 0) { + long parentId = in.readLong(); // Deserialize parentId + res.setParentId(parentId); return res; } byte[] buffer = new byte[strLen]; @@ -117,6 +121,8 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { assert (bytesRead == strLen); String dirName = stringCodec.fromPersistedFormat(buffer); res.setDirName(dirName); + long parentId = in.readLong(); + res.setParentId(parentId); return res; } @@ -128,6 +134,7 @@ public NSSummary copyObject(NSSummary object) { copy.setFileSizeBucket(object.getFileSizeBucket()); copy.setChildDir(object.getChildDir()); copy.setDirName(object.getDirName()); + copy.setParentId(object.getParentId()); return copy; } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java index 2f3de1debcd..57f7686263f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java @@ -71,7 +71,7 @@ private long getEntitySize(String path) throws IOException { EntityHandler.getEntityHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, path); if (null != entityHandler) { - DUResponse duResponse = entityHandler.getDuResponse(false, false); + DUResponse duResponse = entityHandler.getDuResponse(false, false, false); if (null != duResponse && duResponse.getStatus() == ResponseStatus.OK) { return duResponse.getSize(); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index f00d83e64a5..b979307019f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -132,6 +132,10 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, curNSSummary = new NSSummary(); } curNSSummary.setDirName(dirName); + // Set the parent directory ID + if (parentObjectId != -1) { + curNSSummary.setParentId(parentObjectId); + } nsSummaryMap.put(objectId, curNSSummary); // Write the child dir list to the parent directory diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index a9ed342faad..1f50ca6d06d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -208,13 +209,9 @@ public static void writeKeyToOm(OMMetadataManager omMetadataManager, throws IOException { // DB key in FileTable => "volumeId/bucketId/parentId/fileName" // DB key in KeyTable => "/volume/bucket/key" - String omKey; - if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { - omKey = omMetadataManager.getOzonePathKey(volumeObjectId, - bucketObjectId, parentObjectId, fileName); - } else { - omKey = omMetadataManager.getOzoneKey(volume, bucket, key); - } + String omKey = + getKey(omMetadataManager, key, bucket, volume, fileName, parentObjectId, bucketObjectId, volumeObjectId, + bucketLayout); omMetadataManager.getKeyTable(bucketLayout).put(omKey, new OmKeyInfo.Builder() .setBucketName(bucket) @@ -228,6 +225,20 @@ public static void writeKeyToOm(OMMetadataManager omMetadataManager, .build()); } + @SuppressWarnings("checkstyle:ParameterNumber") + private static String getKey(OMMetadataManager omMetadataManager, String key, String bucket, String volume, + String fileName, long parentObjectId, long bucketObjectId, long volumeObjectId, + BucketLayout bucketLayout) { + String omKey; + if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + omKey = omMetadataManager.getOzonePathKey(volumeObjectId, + bucketObjectId, parentObjectId, fileName); + } else { + omKey = omMetadataManager.getOzoneKey(volume, bucket, key); + } + return omKey; + } + @SuppressWarnings("checkstyle:parameternumber") public static void writeKeyToOm(OMMetadataManager omMetadataManager, String keyName, @@ -243,13 +254,10 @@ public static void writeKeyToOm(OMMetadataManager omMetadataManager, long dataSize) throws IOException { - String omKey; - if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { - omKey = omMetadataManager.getOzonePathKey(volumeObjectId, - bucketObjectId, parentObjectId, fileName); - } else { - omKey = omMetadataManager.getOzoneKey(volName, bucketName, keyName); - } + String omKey = + getKey(omMetadataManager, keyName, bucketName, volName, fileName, parentObjectId, bucketObjectId, + volumeObjectId, + bucketLayout); omMetadataManager.getKeyTable(bucketLayout).put(omKey, new OmKeyInfo.Builder() .setBucketName(bucketName) @@ -264,6 +272,42 @@ public static void writeKeyToOm(OMMetadataManager omMetadataManager, .build()); } + /** + * Write a key on OM instance. + * @throw IOException while writing. + */ + @SuppressWarnings("checkstyle:parameternumber") + public static void writeKeyToOm(OMMetadataManager omMetadataManager, + String key, + String bucket, + String volume, + String fileName, + long objectID, + long parentObjectId, + long bucketObjectId, + long volumeObjectId, + long dataSize, + BucketLayout bucketLayout, + ReplicationConfig replicationConfig, + long creationTime, boolean isFile) + throws IOException { + String omKey = + getKey(omMetadataManager, key, bucket, volume, fileName, parentObjectId, bucketObjectId, volumeObjectId, + bucketLayout); + omMetadataManager.getKeyTable(bucketLayout).put(omKey, + new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setFile(isFile) + .setReplicationConfig(replicationConfig) + .setCreationTime(creationTime) + .setObjectID(objectID) + .setParentObjectID(parentObjectId) + .setDataSize(dataSize) + .build()); + } + /** * Write an open key to OM instance optimized for File System. * diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index cbe850b918f..c8fe2aebb6f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -19,12 +19,16 @@ package org.apache.hadoop.ozone.recon.api; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -42,6 +46,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -73,6 +78,7 @@ import java.util.ArrayList; import java.util.Set; import java.util.HashSet; +import java.util.TimeZone; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -118,6 +124,16 @@ public class TestNSSummaryEndpointWithFSO { private OzoneConfiguration ozoneConfiguration; private CommonUtils commonUtils; + private static int chunkSize = 1024 * 1024; + + private ReplicationConfig ratisThree = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + private ReplicationConfig ecType = new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize); + private long epochMillis1 = + ReconUtils.convertToEpochMillis("04-04-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); + private long epochMillis2 = + ReconUtils.convertToEpochMillis("04-05-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); + private static final String TEST_PATH_UTILITY = "/vol1/buck1/a/b/c/d/e/file1.txt"; private static final String PARENT_DIR = "vol1/buck1/a/b/c/d/e"; @@ -132,6 +148,7 @@ public class TestNSSummaryEndpointWithFSO { private static final String BUCKET_TWO = "bucket2"; private static final String BUCKET_THREE = "bucket3"; private static final String BUCKET_FOUR = "bucket4"; + private static final String BUCKET_FIVE = "bucket5"; private static final String KEY_ONE = "file1"; private static final String KEY_TWO = "dir1/dir2/file2"; private static final String KEY_THREE = "dir1/dir3/file3"; @@ -143,6 +160,12 @@ public class TestNSSummaryEndpointWithFSO { private static final String KEY_NINE = "dir5/file9"; private static final String KEY_TEN = "dir5/file10"; private static final String KEY_ELEVEN = "file11"; + private static final String KEY_TWELVE = "dir6/file12"; + private static final String KEY_THIRTEEN = "dir6/file13"; + private static final String KEY_FOURTEEN = "dir6/dir7/file14"; + private static final String KEY_FIFTEEN = "dir6/dir7/file15"; + private static final String KEY_SIXTEEN = "dir6/dir7/dir8/file16"; + private static final String KEY_SEVENTEEN = "dir6/dir7/dir8/file17"; private static final String MULTI_BLOCK_KEY = "dir1/file7"; private static final String MULTI_BLOCK_FILE = "file7"; @@ -157,12 +180,21 @@ public class TestNSSummaryEndpointWithFSO { private static final String FILE_NINE = "file9"; private static final String FILE_TEN = "file10"; private static final String FILE_ELEVEN = "file11"; + private static final String FILE_TWELVE = "file12"; + private static final String FILE_THIRTEEN = "file13"; + private static final String FILE_FOURTEEN = "file14"; + private static final String FILE_FIFTEEN = "file15"; + private static final String FILE_SIXTEEN = "file16"; + private static final String FILE_SEVENTEEN = "file17"; private static final String DIR_ONE = "dir1"; private static final String DIR_TWO = "dir2"; private static final String DIR_THREE = "dir3"; private static final String DIR_FOUR = "dir4"; private static final String DIR_FIVE = "dir5"; + private static final String DIR_SIX = "dir6"; + private static final String DIR_SEVEN = "dir7"; + private static final String DIR_EIGHT = "dir8"; // objects IDs private static final long VOL_OBJECT_ID = 0L; private static final long BUCKET_ONE_OBJECT_ID = 1L; @@ -179,6 +211,7 @@ public class TestNSSummaryEndpointWithFSO { private static final long DIR_FOUR_OBJECT_ID = 12L; private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L; private static final long KEY_SEVEN_OBJECT_ID = 13L; + private static final long VOL_TWO_OBJECT_ID = 14L; private static final long BUCKET_THREE_OBJECT_ID = 15L; private static final long BUCKET_FOUR_OBJECT_ID = 16L; @@ -188,6 +221,17 @@ public class TestNSSummaryEndpointWithFSO { private static final long KEY_TEN_OBJECT_ID = 20L; private static final long KEY_ELEVEN_OBJECT_ID = 21L; + private static final long BUCKET_FIVE_OBJECT_ID = 22L; + private static final long DIR_SIX_OBJECT_ID = 23L; + private static final long KEY_TWELVE_OBJECT_ID = 24L; + private static final long KEY_THIRTEEN_OBJECT_ID = 25L; + private static final long DIR_SEVEN_OBJECT_ID = 26L; + private static final long KEY_FOURTEEN_OBJECT_ID = 27L; + private static final long KEY_FIFTEEN_OBJECT_ID = 28L; + private static final long DIR_EIGHT_OBJECT_ID = 29L; + private static final long KEY_SIXTEEN_OBJECT_ID = 30L; + private static final long KEY_SEVENTEEN_OBJECT_ID = 31L; + // container IDs private static final long CONTAINER_ONE_ID = 1L; private static final long CONTAINER_TWO_ID = 2L; @@ -225,6 +269,13 @@ public class TestNSSummaryEndpointWithFSO { private static final long KEY_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long KEY_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long KEY_TWELVE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_THIRTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_FOURTEEN_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long KEY_FIFTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_SIXTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_SEVENTEEN_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE1_SIZE_WITH_REPLICA = getReplicatedSize(KEY_ONE_SIZE, StandaloneReplicationConfig.getInstance(ONE)); @@ -315,6 +366,7 @@ public class TestNSSummaryEndpointWithFSO { private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB; private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB; private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB; + private static final long BUCKET_FIVE_QUOTA = OzoneConsts.MB; // mock client's path requests private static final String TEST_USER = "TestUser"; @@ -323,10 +375,14 @@ public class TestNSSummaryEndpointWithFSO { private static final String VOL_TWO_PATH = "/vol2"; private static final String BUCKET_ONE_PATH = "/vol/bucket1"; private static final String BUCKET_TWO_PATH = "/vol/bucket2"; + private static final String BUCKET_FIVE_PATH = "/vol2/bucket5"; private static final String DIR_ONE_PATH = "/vol/bucket1/dir1"; private static final String DIR_TWO_PATH = "/vol/bucket1/dir1/dir2"; private static final String DIR_THREE_PATH = "/vol/bucket1/dir1/dir3"; private static final String DIR_FOUR_PATH = "/vol/bucket1/dir1/dir4"; + private static final String DIR_SIX_PATH = "/vol2/bucket5/dir6"; + private static final String DIR_SEVEN_PATH = "/vol2/bucket5/dir6/dir7"; + private static final String DIR_EIGHT_PATH = "/vol2/bucket5/dir6/dir7/dir8"; private static final String KEY_PATH = "/vol/bucket2/file4"; private static final String MULTI_BLOCK_KEY_PATH = "/vol/bucket1/dir1/file7"; private static final String INVALID_PATH = "/vol/path/not/found"; @@ -667,6 +723,17 @@ public void testQuotaUsage() throws Exception { invalidResObj.getResponseCode()); } + @Test + public void testListKeysBucketFive() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", "", 0, BUCKET_FIVE_PATH, + 1000, true); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + assertEquals(3, duBucketResponse.getCount()); + DUResponse.DiskUsage duDir1 = duBucketResponse.getDuData().get(0); + assertEquals(DIR_SIX_PATH.substring(1) + OM_KEY_PREFIX + FILE_TWELVE, duDir1.getSubpath()); + assertEquals("RATIS", duDir1.getReplicationType()); + } @Test public void testFileSizeDist() throws Exception { @@ -695,6 +762,7 @@ public void checkFileSizeDist(String path, int bin0, * Write directories and keys info into OM DB. * @throws Exception */ + @SuppressWarnings("checkstyle:methodlength") private void populateOMDB() throws Exception { // write all directories writeDirToOm(reconOMMetadataManager, DIR_ONE_OBJECT_ID, @@ -712,6 +780,15 @@ private void populateOMDB() throws Exception { writeDirToOm(reconOMMetadataManager, DIR_FIVE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, DIR_FIVE); + writeDirToOm(reconOMMetadataManager, DIR_SIX_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, DIR_SIX); + writeDirToOm(reconOMMetadataManager, DIR_SEVEN_OBJECT_ID, + DIR_SIX_OBJECT_ID, BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, DIR_SEVEN); + writeDirToOm(reconOMMetadataManager, DIR_EIGHT_OBJECT_ID, + DIR_SEVEN_OBJECT_ID, BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, DIR_EIGHT); // write all keys writeKeyToOm(reconOMMetadataManager, @@ -824,6 +901,87 @@ private void populateOMDB() throws Exception { VOL_TWO_OBJECT_ID, KEY_ELEVEN_SIZE, getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + FILE_TWELVE, + BUCKET_FIVE, + VOL_TWO, + FILE_TWELVE, + KEY_TWELVE_OBJECT_ID, + DIR_SIX_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_TWELVE_SIZE, + getBucketLayout(), + ratisThree, + epochMillis1, true); + writeKeyToOm(reconOMMetadataManager, + FILE_THIRTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_THIRTEEN, + KEY_THIRTEEN_OBJECT_ID, + DIR_SIX_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_THIRTEEN_SIZE, + getBucketLayout(), + ecType, + epochMillis2, true); + + writeKeyToOm(reconOMMetadataManager, + FILE_FOURTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_FOURTEEN, + KEY_FOURTEEN_OBJECT_ID, + DIR_SEVEN_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_FOURTEEN_SIZE, + getBucketLayout(), + ratisThree, + epochMillis1, true); + writeKeyToOm(reconOMMetadataManager, + FILE_FIFTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_FIFTEEN, + KEY_FIFTEEN_OBJECT_ID, + DIR_SEVEN_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_FIFTEEN_SIZE, + getBucketLayout(), + ecType, + epochMillis2, true); + + writeKeyToOm(reconOMMetadataManager, + FILE_SIXTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_SIXTEEN, + KEY_SIXTEEN_OBJECT_ID, + DIR_EIGHT_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_SIXTEEN_SIZE, + getBucketLayout(), + ratisThree, + epochMillis1, true); + writeKeyToOm(reconOMMetadataManager, + FILE_SEVENTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_SEVENTEEN, + KEY_SEVENTEEN_OBJECT_ID, + DIR_EIGHT_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_SEVENTEEN_SIZE, + getBucketLayout(), + ecType, + epochMillis2, true); } /** @@ -895,6 +1053,14 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketLayout(getBucketLayout()) .build(); + OmBucketInfo bucketInfo5 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_FIVE) + .setObjectID(BUCKET_FIVE_OBJECT_ID) + .setQuotaInBytes(BUCKET_FIVE_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + String bucketKey = omMetadataManager.getBucketKey( bucketInfo.getVolumeName(), bucketInfo.getBucketName()); String bucketKey2 = omMetadataManager.getBucketKey( @@ -903,11 +1069,14 @@ private static OMMetadataManager initializeNewOmMetadataManager( bucketInfo3.getVolumeName(), bucketInfo3.getBucketName()); String bucketKey4 = omMetadataManager.getBucketKey( bucketInfo4.getVolumeName(), bucketInfo4.getBucketName()); + String bucketKey5 = omMetadataManager.getBucketKey( + bucketInfo5.getVolumeName(), bucketInfo5.getBucketName()); omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3); omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4); + omMetadataManager.getBucketTable().put(bucketKey5, bucketInfo5); return omMetadataManager; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index 8d8299aefc1..722a9963d49 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -19,13 +19,17 @@ package org.apache.hadoop.ozone.recon.api; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -45,6 +49,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; import org.apache.hadoop.ozone.recon.api.types.BucketObjectDBInfo; @@ -81,6 +86,7 @@ import java.util.ArrayList; import java.util.Set; import java.util.HashSet; +import java.util.TimeZone; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -146,11 +152,14 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final String BUCKET_TWO = "bucket2"; private static final String BUCKET_THREE = "bucket3"; private static final String BUCKET_FOUR = "bucket4"; + private static final String BUCKET_FIVE = "bucket5"; private static final String KEY_ONE = "file1"; private static final String KEY_TWO = "////file2"; private static final String KEY_THREE = "file3///"; private static final String KEY_FOUR = "file4"; private static final String KEY_FIVE = "_//////"; + private static final String KEY_SIX = "file6"; + private static final String KEY_SEVEN = "file7"; private static final String KEY_EIGHT = "file8"; private static final String KEY_NINE = "//////"; private static final String KEY_TEN = "///__file10"; @@ -164,11 +173,14 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final long BUCKET_TWO_OBJECT_ID = 2L; private static final long BUCKET_THREE_OBJECT_ID = 15L; private static final long BUCKET_FOUR_OBJECT_ID = 16L; + private static final long BUCKET_FIVE_OBJECT_ID = 7L; private static final long KEY_ONE_OBJECT_ID = 3L; private static final long KEY_TWO_OBJECT_ID = 5L; private static final long KEY_THREE_OBJECT_ID = 8L; private static final long KEY_FOUR_OBJECT_ID = 6L; private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long KEY_SEVEN_OBJECT_ID = 11L; private static final long KEY_EIGHT_OBJECT_ID = 17L; private static final long KEY_NINE_OBJECT_ID = 19L; private static final long KEY_TEN_OBJECT_ID = 20L; @@ -205,6 +217,8 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final long FILE_THREE_SIZE = 4 * OzoneConsts.KB + 1; // bin 3 private static final long FILE_FOUR_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long FILE_FIVE_SIZE = 100L; // bin 0 + private static final long FILE_SIX_SIZE = 100L; // bin 0 + private static final long FILE_SEVEN_SIZE = 100L; // bin 0 private static final long FILE_EIGHT_SIZE = OzoneConsts.KB + 1; // bin 1 private static final long FILE_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long FILE_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 @@ -226,6 +240,13 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { getReplicatedSize(FILE_FIVE_SIZE, StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE6_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_SIX_SIZE, + StandaloneReplicationConfig.getInstance(THREE)); + private static final long FILE7_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_SEVEN_SIZE, + StandaloneReplicationConfig.getInstance(THREE)); + private static final long FILE8_SIZE_WITH_REPLICA = getReplicatedSize(FILE_EIGHT_SIZE, StandaloneReplicationConfig.getInstance(ONE)); @@ -286,6 +307,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB; private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB; private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB; + private static final long BUCKET_FIVE_QUOTA = OzoneConsts.MB; // mock client's path requests private static final String TEST_USER = "TestUser"; @@ -300,6 +322,8 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE; private static final String BUCKET_FOUR_PATH = ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR; + private static final String BUCKET_FIVE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_FIVE; private static final String KEY_ONE_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_ONE; private static final String KEY_TWO_PATH = @@ -310,6 +334,10 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; private static final String KEY_FIVE_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FIVE; + private static final String KEY_SIX_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_FIVE + ROOT_PATH + KEY_SIX; + private static final String KEY_SEVEN_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_FIVE + ROOT_PATH + KEY_SEVEN; private static final String KEY_EIGHT_PATH = ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_EIGHT; private static final String KEY_NINE_PATH = @@ -342,11 +370,23 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final long BUCKET_TWO_DATA_SIZE = FILE_FOUR_SIZE + FILE_FIVE_SIZE; + private static final long BUCKET_FIVE_DATA_SIZE = + FILE_SIX_SIZE + FILE_SEVEN_SIZE; + private static final long BUCKET_THREE_DATA_SIZE = FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE; private static final long BUCKET_FOUR_DATA_SIZE = FILE_ELEVEN_SIZE; + private static int chunkSize = 1024 * 1024; + + private ReplicationConfig ratisThree = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + private ReplicationConfig ecType = new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize); + private long epochMillis1 = + ReconUtils.convertToEpochMillis("04-04-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); + private long epochMillis2 = + ReconUtils.convertToEpochMillis("04-05-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); @BeforeEach public void setUp() throws Exception { @@ -573,7 +613,7 @@ public void testDiskUsageVolume() throws Exception { Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, false, false); DUResponse duVolRes = (DUResponse) volResponse.getEntity(); - assertEquals(2, duVolRes.getCount()); + assertEquals(3, duVolRes.getCount()); List duData = duVolRes.getDuData(); // sort based on subpath Collections.sort(duData, @@ -904,6 +944,84 @@ public void testNormalizePathUptoBucket() { OmUtils.normalizePathUptoBucket("volume/bucket/key$%#1/./////////key$%#2")); } + @Test + public void testListKeysBucketFive() throws Exception { + // filter list keys under bucketFive based on RATIS ReplicationConfig and key creation date + // creationDate filter passed 1 minute above of KEY6 creation date, so listKeys API will return + // ZERO keys, as none of the RATIS keys got created after creationDate filter value. + Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", + "04-04-2024 12:31:00", 0, BUCKET_FIVE_PATH, 10, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + // creationDate filter and keySize filter both are empty, so listKeys API should return both KEY6 and KEY7 keys, + // but replication type RATIS filter will filter out KEY7 as only 1 RATIS key KEY6 got created. + bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", + null, 0, BUCKET_FIVE_PATH, 10, false); + duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(1, duBucketResponse.getCount()); + assertEquals(KEY_SIX, duBucketResponse.getDuData().get(0).getSubpath()); + + // creationDate filter passed same as KEY6 creation date, so listKeys API will return + // KEY6 key only, as only 1 RATIS key KEY6 created at "04-04-2024 12:30:00". + bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", + "04-04-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false); + duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(1, duBucketResponse.getCount()); + assertEquals(KEY_SIX, duBucketResponse.getDuData().get(0).getSubpath()); + + // creationDate filter passed same as KEY6 and KEY7 creation date, but replicationType filter is EC, + // so listKeys API will return only KEY7, as only one EC key got created at or after creationDate filter value. + bucketResponse = nsSummaryEndpoint.listKeysWithDu("EC", + "04-04-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false); + duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(1, duBucketResponse.getCount()); + assertEquals(KEY_SEVEN, duBucketResponse.getDuData().get(0).getSubpath()); + + // creationDate filter passed same as KEY7 creation date, but replicationType filter is EC, + // so listKeys API will return only KEY7, as only one EC key got created at or after creationDate filter value. + bucketResponse = nsSummaryEndpoint.listKeysWithDu("EC", + "04-05-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false); + duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(1, duBucketResponse.getCount()); + assertEquals(KEY_SEVEN, duBucketResponse.getDuData().get(0).getSubpath()); + + // creationDate filter passed same as KEY7 creation date, but replicationType filter is RATIS, + // so listKeys API will return ZERO keys, as no RATIS key got created at or after creationDate filter value. + bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", + "04-05-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false); + duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + // creationDate filter passed same as KEY6 creation date, and replicationType filter is RATIS, + // so listKeys API will return only KEY6, as only one RATIS key got created at or after creationDate filter value, + // but since keySize filter value is 110 bytes and all RATIS keys created are of size 100 bytes, so KEY6 will be + // filtered out and API will return ZERO keys. + bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", + "04-04-2024 12:30:00", 110, BUCKET_FIVE_PATH, 10, false); + duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + // creationDate filter passed same as KEY6 creation date, and replicationType filter is EC, + // so listKeys API will return only KEY7, as only one EC key got created at or after creationDate filter value, + // but since keySize filter value is 110 bytes and all EC keys created are of size 100 bytes, so KEY7 will be + // filtered out and API will return ZERO keys. + bucketResponse = nsSummaryEndpoint.listKeysWithDu("EC", + "04-04-2024 12:30:00", 110, BUCKET_FIVE_PATH, 10, false); + duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + assertEquals(BUCKET_FIVE_DATA_SIZE, duBucketResponse.getSize()); + } + /** * Testing the following case. @@ -929,7 +1047,6 @@ public void testNormalizePathUptoBucket() { */ @SuppressWarnings("checkstyle:MethodLength") private void populateOMDB() throws Exception { - // write all keys writeKeyToOm(reconOMMetadataManager, KEY_ONE, @@ -986,6 +1103,32 @@ private void populateOMDB() throws Exception { VOL_OBJECT_ID, FILE_FIVE_SIZE, getOBSBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_SIX, + BUCKET_FIVE, + VOL, + KEY_SIX, + KEY_SIX_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_SIX_SIZE, + getOBSBucketLayout(), + ratisThree, + epochMillis1, true); + writeKeyToOm(reconOMMetadataManager, + KEY_SEVEN, + BUCKET_FIVE, + VOL, + KEY_SEVEN, + KEY_SEVEN_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + BUCKET_FIVE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_SEVEN_SIZE, + getOBSBucketLayout(), + ecType, + epochMillis2, true); writeKeyToOm(reconOMMetadataManager, KEY_EIGHT, @@ -1104,6 +1247,14 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketLayout(getLegacyBucketLayout()) .build(); + OmBucketInfo bucketInfo5 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_FIVE) + .setObjectID(BUCKET_FIVE_OBJECT_ID) + .setQuotaInBytes(BUCKET_FIVE_QUOTA) + .setBucketLayout(getLegacyBucketLayout()) + .build(); + String bucketKey = omMetadataManager.getBucketKey( bucketInfo.getVolumeName(), bucketInfo.getBucketName()); String bucketKey2 = omMetadataManager.getBucketKey( @@ -1112,11 +1263,14 @@ private static OMMetadataManager initializeNewOmMetadataManager( bucketInfo3.getVolumeName(), bucketInfo3.getBucketName()); String bucketKey4 = omMetadataManager.getBucketKey( bucketInfo4.getVolumeName(), bucketInfo4.getBucketName()); + String bucketKey5 = omMetadataManager.getBucketKey( + bucketInfo5.getVolumeName(), bucketInfo5.getBucketName()); omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3); omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4); + omMetadataManager.getBucketTable().put(bucketKey5, bucketInfo5); return omMetadataManager; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java index fbddd50ee4c..f0af066c46f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java @@ -114,9 +114,9 @@ public void testInitNSSummaryTable() throws IOException { private void putThreeNSMetadata() throws IOException { HashMap hmap = new HashMap<>(); - hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1")); - hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2")); - hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3")); + hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1)); + hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1)); + hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1)); RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); for (Map.Entry entry: hmap.entrySet()) { reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, From 8894d388fad653ae5c68a31b72b85ef526ace41b Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 10 Apr 2024 23:32:08 +0530 Subject: [PATCH 02/11] HDDS-10634. Recon - listKeys API for listing of OBS , FSO and Legacy bucket keys with filters. --- .../ozone/recon/api/NSSummaryEndpoint.java | 2 +- .../api/TestNSSummaryEndpointWithFSO.java | 64 ++++++++++++------- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 61 ++++++++---------- .../ozone/recon/common/CommonUtils.java | 6 +- 4 files changed, 71 insertions(+), 62 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java index d9d613993b7..a4fb67f218a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java @@ -190,7 +190,7 @@ public Response getDiskUsage(@QueryParam("path") String path, @GET @Path("/listKeys") @SuppressWarnings("methodlength") - public Response listKeysWithDu(@DefaultValue("RATIS") @QueryParam("replicationType") String replicationType, + public Response listKeysWithDu(@QueryParam("replicationType") String replicationType, @QueryParam("creationDate") String creationDate, @DefaultValue(DEFAULT_KEY_SIZE) @QueryParam("keySize") long keySize, @DefaultValue(OM_KEY_PREFIX) @QueryParam("startPrefix") String startPrefix, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index c8fe2aebb6f..feb5246191f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -23,7 +23,6 @@ import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -126,9 +125,8 @@ public class TestNSSummaryEndpointWithFSO { private static int chunkSize = 1024 * 1024; - private ReplicationConfig ratisThree = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - private ReplicationConfig ecType = new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize); + private ReplicationConfig ratisOne = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); private long epochMillis1 = ReconUtils.convertToEpochMillis("04-04-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); private long epochMillis2 = @@ -160,12 +158,6 @@ public class TestNSSummaryEndpointWithFSO { private static final String KEY_NINE = "dir5/file9"; private static final String KEY_TEN = "dir5/file10"; private static final String KEY_ELEVEN = "file11"; - private static final String KEY_TWELVE = "dir6/file12"; - private static final String KEY_THIRTEEN = "dir6/file13"; - private static final String KEY_FOURTEEN = "dir6/dir7/file14"; - private static final String KEY_FIFTEEN = "dir6/dir7/file15"; - private static final String KEY_SIXTEEN = "dir6/dir7/dir8/file16"; - private static final String KEY_SEVENTEEN = "dir6/dir7/dir8/file17"; private static final String MULTI_BLOCK_KEY = "dir1/file7"; private static final String MULTI_BLOCK_FILE = "file7"; @@ -309,6 +301,24 @@ public class TestNSSummaryEndpointWithFSO { private static final long FILE11_SIZE_WITH_REPLICA = getReplicatedSize(KEY_ELEVEN_SIZE, StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE12_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_TWELVE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE13_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_THIRTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE14_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_FOURTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE15_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_FIFTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE16_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_SIXTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE17_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_SEVENTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA = FILE7_SIZE_WITH_REPLICA; private static final long @@ -323,7 +333,13 @@ public class TestNSSummaryEndpointWithFSO { + FILE8_SIZE_WITH_REPLICA + FILE9_SIZE_WITH_REPLICA + FILE10_SIZE_WITH_REPLICA - + FILE11_SIZE_WITH_REPLICA; + + FILE11_SIZE_WITH_REPLICA + + FILE12_SIZE_WITH_REPLICA + + FILE13_SIZE_WITH_REPLICA + + FILE14_SIZE_WITH_REPLICA + + FILE15_SIZE_WITH_REPLICA + + FILE16_SIZE_WITH_REPLICA + + FILE17_SIZE_WITH_REPLICA; private static final long MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL @@ -388,14 +404,16 @@ public class TestNSSummaryEndpointWithFSO { private static final String INVALID_PATH = "/vol/path/not/found"; // some expected answers - private static final long ROOT_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + - KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + - KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; + private static final long ROOT_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_FOUR_SIZE + + KEY_FIVE_SIZE + KEY_SIX_SIZE + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE + + KEY_TWELVE_SIZE + KEY_THIRTEEN_SIZE + KEY_FOURTEEN_SIZE + KEY_FIFTEEN_SIZE + KEY_SIXTEEN_SIZE + + KEY_SEVENTEEN_SIZE; private static final long VOL_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE; private static final long VOL_TWO_DATA_SIZE = - KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE + KEY_TWELVE_SIZE + KEY_THIRTEEN_SIZE + + KEY_FOURTEEN_SIZE + KEY_FIFTEEN_SIZE + KEY_SIXTEEN_SIZE + KEY_SEVENTEEN_SIZE; private static final long BUCKET_ONE_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_SIX_SIZE; @@ -729,7 +747,7 @@ public void testListKeysBucketFive() throws Exception { Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", "", 0, BUCKET_FIVE_PATH, 1000, true); DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); - assertEquals(3, duBucketResponse.getCount()); + assertEquals(6, duBucketResponse.getCount()); DUResponse.DiskUsage duDir1 = duBucketResponse.getDuData().get(0); assertEquals(DIR_SIX_PATH.substring(1) + OM_KEY_PREFIX + FILE_TWELVE, duDir1.getSubpath()); assertEquals("RATIS", duDir1.getReplicationType()); @@ -737,7 +755,7 @@ public void testListKeysBucketFive() throws Exception { @Test public void testFileSizeDist() throws Exception { - checkFileSizeDist(ROOT_PATH, 2, 3, 4, 1); + checkFileSizeDist(ROOT_PATH, 2, 5, 8, 1); checkFileSizeDist(VOL_PATH, 2, 1, 2, 1); checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 1); checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 1); @@ -913,7 +931,7 @@ private void populateOMDB() throws Exception { VOL_TWO_OBJECT_ID, KEY_TWELVE_SIZE, getBucketLayout(), - ratisThree, + ratisOne, epochMillis1, true); writeKeyToOm(reconOMMetadataManager, FILE_THIRTEEN, @@ -926,7 +944,7 @@ private void populateOMDB() throws Exception { VOL_TWO_OBJECT_ID, KEY_THIRTEEN_SIZE, getBucketLayout(), - ecType, + ratisOne, epochMillis2, true); writeKeyToOm(reconOMMetadataManager, @@ -940,7 +958,7 @@ private void populateOMDB() throws Exception { VOL_TWO_OBJECT_ID, KEY_FOURTEEN_SIZE, getBucketLayout(), - ratisThree, + ratisOne, epochMillis1, true); writeKeyToOm(reconOMMetadataManager, FILE_FIFTEEN, @@ -953,7 +971,7 @@ private void populateOMDB() throws Exception { VOL_TWO_OBJECT_ID, KEY_FIFTEEN_SIZE, getBucketLayout(), - ecType, + ratisOne, epochMillis2, true); writeKeyToOm(reconOMMetadataManager, @@ -967,7 +985,7 @@ private void populateOMDB() throws Exception { VOL_TWO_OBJECT_ID, KEY_SIXTEEN_SIZE, getBucketLayout(), - ratisThree, + ratisOne, epochMillis1, true); writeKeyToOm(reconOMMetadataManager, FILE_SEVENTEEN, @@ -980,7 +998,7 @@ private void populateOMDB() throws Exception { VOL_TWO_OBJECT_ID, KEY_SEVENTEEN_SIZE, getBucketLayout(), - ecType, + ratisOne, epochMillis2, true); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index 722a9963d49..b377c038998 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -269,6 +269,8 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { + FILE3_SIZE_WITH_REPLICA + FILE4_SIZE_WITH_REPLICA + FILE5_SIZE_WITH_REPLICA + + FILE6_SIZE_WITH_REPLICA + + FILE7_SIZE_WITH_REPLICA + FILE8_SIZE_WITH_REPLICA + FILE9_SIZE_WITH_REPLICA + FILE10_SIZE_WITH_REPLICA @@ -280,7 +282,9 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { + FILE2_SIZE_WITH_REPLICA + FILE3_SIZE_WITH_REPLICA + FILE4_SIZE_WITH_REPLICA - + FILE5_SIZE_WITH_REPLICA; + + FILE5_SIZE_WITH_REPLICA + + FILE6_SIZE_WITH_REPLICA + + FILE7_SIZE_WITH_REPLICA; private static final long MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1 @@ -354,11 +358,10 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { // some expected answers private static final long ROOT_DATA_SIZE = - FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE + - FILE_FIVE_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + - FILE_ELEVEN_SIZE; + FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE + FILE_SIX_SIZE + + FILE_SEVEN_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE; private static final long VOL_DATA_SIZE = FILE_ONE_SIZE + FILE_TWO_SIZE + - FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE; + FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE + FILE_SIX_SIZE + FILE_SEVEN_SIZE; private static final long VOL_TWO_DATA_SIZE = FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE; @@ -380,8 +383,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static int chunkSize = 1024 * 1024; - private ReplicationConfig ratisThree = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + private ReplicationConfig ratisOne = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, ONE); private ReplicationConfig ecType = new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize); private long epochMillis1 = ReconUtils.convertToEpochMillis("04-04-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); @@ -449,8 +451,8 @@ public void testGetBasicInfoRoot() throws Exception { (NamespaceSummaryResponse) rootResponse.getEntity(); assertEquals(EntityType.ROOT, rootResponseObj.getEntityType()); assertEquals(2, rootResponseObj.getCountStats().getNumVolume()); - assertEquals(4, rootResponseObj.getCountStats().getNumBucket()); - assertEquals(9, rootResponseObj.getCountStats().getNumTotalKey()); + assertEquals(5, rootResponseObj.getCountStats().getNumBucket()); + assertEquals(11, rootResponseObj.getCountStats().getNumTotalKey()); } @Test @@ -461,8 +463,8 @@ public void testGetBasicInfoVol() throws Exception { (NamespaceSummaryResponse) volResponse.getEntity(); assertEquals(EntityType.VOLUME, volResponseObj.getEntityType()); - assertEquals(2, volResponseObj.getCountStats().getNumBucket()); - assertEquals(5, volResponseObj.getCountStats().getNumTotalKey()); + assertEquals(3, volResponseObj.getCountStats().getNumBucket()); + assertEquals(7, volResponseObj.getCountStats().getNumTotalKey()); assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. getObjectDBInfo()).getAdmin()); assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. @@ -897,8 +899,8 @@ public void testQuotaUsage() throws Exception { @Test public void testFileSizeDist() throws Exception { - checkFileSizeDist(ROOT_PATH, 2, 3, 3, 1); - checkFileSizeDist(VOL_PATH, 2, 1, 1, 1); + checkFileSizeDist(ROOT_PATH, 4, 3, 3, 1); + checkFileSizeDist(VOL_PATH, 4, 1, 1, 1); checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 0, 1); } @@ -948,56 +950,45 @@ public void testNormalizePathUptoBucket() { public void testListKeysBucketFive() throws Exception { // filter list keys under bucketFive based on RATIS ReplicationConfig and key creation date // creationDate filter passed 1 minute above of KEY6 creation date, so listKeys API will return - // ZERO keys, as none of the RATIS keys got created after creationDate filter value. + // ZERO keys, as one RATIS keys got created after creationDate filter value. Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", "04-04-2024 12:31:00", 0, BUCKET_FIVE_PATH, 10, false); DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); // There are no sub-paths under this OBS bucket. - assertEquals(0, duBucketResponse.getCount()); + assertEquals(1, duBucketResponse.getCount()); // creationDate filter and keySize filter both are empty, so listKeys API should return both KEY6 and KEY7 keys, - // but replication type RATIS filter will filter out KEY7 as only 1 RATIS key KEY6 got created. bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", null, 0, BUCKET_FIVE_PATH, 10, false); duBucketResponse = (DUResponse) bucketResponse.getEntity(); // There are no sub-paths under this OBS bucket. - assertEquals(1, duBucketResponse.getCount()); + assertEquals(2, duBucketResponse.getCount()); assertEquals(KEY_SIX, duBucketResponse.getDuData().get(0).getSubpath()); // creationDate filter passed same as KEY6 creation date, so listKeys API will return - // KEY6 key only, as only 1 RATIS key KEY6 created at "04-04-2024 12:30:00". + // KEY6 and KEY7 keys, as only 2 RATIS keys created at "04-04-2024 12:30:00". bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", "04-04-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false); duBucketResponse = (DUResponse) bucketResponse.getEntity(); // There are no sub-paths under this OBS bucket. - assertEquals(1, duBucketResponse.getCount()); + assertEquals(2, duBucketResponse.getCount()); assertEquals(KEY_SIX, duBucketResponse.getDuData().get(0).getSubpath()); // creationDate filter passed same as KEY6 and KEY7 creation date, but replicationType filter is EC, - // so listKeys API will return only KEY7, as only one EC key got created at or after creationDate filter value. + // so listKeys API will return zero keys, because no EC key got created at or after creationDate filter value. bucketResponse = nsSummaryEndpoint.listKeysWithDu("EC", "04-04-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false); duBucketResponse = (DUResponse) bucketResponse.getEntity(); // There are no sub-paths under this OBS bucket. - assertEquals(1, duBucketResponse.getCount()); - assertEquals(KEY_SEVEN, duBucketResponse.getDuData().get(0).getSubpath()); - - // creationDate filter passed same as KEY7 creation date, but replicationType filter is EC, - // so listKeys API will return only KEY7, as only one EC key got created at or after creationDate filter value. - bucketResponse = nsSummaryEndpoint.listKeysWithDu("EC", - "04-05-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false); - duBucketResponse = (DUResponse) bucketResponse.getEntity(); - // There are no sub-paths under this OBS bucket. - assertEquals(1, duBucketResponse.getCount()); - assertEquals(KEY_SEVEN, duBucketResponse.getDuData().get(0).getSubpath()); + assertEquals(0, duBucketResponse.getCount()); // creationDate filter passed same as KEY7 creation date, but replicationType filter is RATIS, - // so listKeys API will return ZERO keys, as no RATIS key got created at or after creationDate filter value. + // so listKeys API will return ZERO keys, as only 1 RATIS key got created at or after creationDate filter value. bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", "04-05-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false); duBucketResponse = (DUResponse) bucketResponse.getEntity(); // There are no sub-paths under this OBS bucket. - assertEquals(0, duBucketResponse.getCount()); + assertEquals(1, duBucketResponse.getCount()); // creationDate filter passed same as KEY6 creation date, and replicationType filter is RATIS, // so listKeys API will return only KEY6, as only one RATIS key got created at or after creationDate filter value, @@ -1114,7 +1105,7 @@ private void populateOMDB() throws Exception { VOL_OBJECT_ID, FILE_SIX_SIZE, getOBSBucketLayout(), - ratisThree, + ratisOne, epochMillis1, true); writeKeyToOm(reconOMMetadataManager, KEY_SEVEN, @@ -1127,7 +1118,7 @@ private void populateOMDB() throws Exception { VOL_OBJECT_ID, FILE_SEVEN_SIZE, getOBSBucketLayout(), - ecType, + ratisOne, epochMillis2, true); writeKeyToOm(reconOMMetadataManager, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java index 8b35bfdd4d2..dc2e26861c3 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java @@ -91,9 +91,9 @@ public void testNSSummaryBasicInfoRoot( (NamespaceSummaryResponse) rootResponse.getEntity(); assertEquals(EntityType.ROOT, rootResponseObj.getEntityType()); assertEquals(2, rootResponseObj.getCountStats().getNumVolume()); - assertEquals(4, rootResponseObj.getCountStats().getNumBucket()); - assertEquals(5, rootResponseObj.getCountStats().getNumTotalDir()); - assertEquals(10, rootResponseObj.getCountStats().getNumTotalKey()); + assertEquals(5, rootResponseObj.getCountStats().getNumBucket()); + assertEquals(8, rootResponseObj.getCountStats().getNumTotalDir()); + assertEquals(16, rootResponseObj.getCountStats().getNumTotalKey()); assertEquals("USER", rootResponseObj.getObjectDBInfo().getAcls().get(0).getType()); assertEquals("WRITE", rootResponseObj.getObjectDBInfo().getAcls().get(0) From fd97d45f88659aacd675a0b21845cf9295900604 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Thu, 11 Apr 2024 08:43:30 +0530 Subject: [PATCH 03/11] HDDS-10634. Fixed findbugs. --- .../ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index b377c038998..cfbe6d3524b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -384,7 +384,6 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static int chunkSize = 1024 * 1024; private ReplicationConfig ratisOne = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, ONE); - private ReplicationConfig ecType = new ECReplicationConfig(3, 2, ECReplicationConfig.EcCodec.RS, chunkSize); private long epochMillis1 = ReconUtils.convertToEpochMillis("04-04-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); private long epochMillis2 = From 031097bed774242467438fca6241bc03a0a2c208 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Thu, 11 Apr 2024 08:47:02 +0530 Subject: [PATCH 04/11] HDDS-10634. Fixed checkstyle. --- .../ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index cfbe6d3524b..ea2aedce411 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -23,7 +23,6 @@ import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; From 6166f470c4a2d145fb6a13d70c35a8792be76a04 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Mon, 15 Apr 2024 15:29:31 +0530 Subject: [PATCH 05/11] HDDS-10634. Added pagination logic. --- .../hadoop/ozone/recon/ReconConstants.java | 1 - .../ozone/recon/api/NSSummaryEndpoint.java | 12 +- .../api/handlers/BucketEntityHandler.java | 15 ++- .../recon/api/handlers/BucketHandler.java | 20 ++- .../api/handlers/DirectoryEntityHandler.java | 10 +- .../recon/api/handlers/EntityHandler.java | 7 +- .../recon/api/handlers/FSOBucketHandler.java | 33 +++-- .../recon/api/handlers/KeyEntityHandler.java | 3 +- .../api/handlers/LegacyBucketHandler.java | 27 ++-- .../recon/api/handlers/OBSBucketHandler.java | 15 ++- .../recon/api/handlers/RootEntityHandler.java | 10 +- .../api/handlers/UnknownEntityHandler.java | 3 +- .../api/handlers/VolumeEntityHandler.java | 5 +- .../ozone/recon/api/types/DUResponse.java | 27 ++++ .../hadoop/ozone/recon/api/types/Stats.java | 77 ++++++++++++ .../ozone/recon/heatmap/HeatMapUtil.java | 3 +- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 115 +++++++++++++++--- 17 files changed, 314 insertions(+), 69 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/Stats.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index bea4718cc11..59e3dc537cc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -40,7 +40,6 @@ private ReconConstants() { public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false"; public static final String DEFAULT_FETCH_COUNT = "1000"; public static final String DEFAULT_KEY_SIZE = "0"; - public static final String DEFAULT_START_VALUE = "1"; public static final String DEFAULT_BATCH_NUMBER = "1"; public static final String RECON_QUERY_BATCH_PARAM = "batchNum"; public static final String RECON_QUERY_PREVKEY = "prevKey"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java index a4fb67f218a..73e4465251a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.slf4j.Logger; @@ -142,7 +143,7 @@ public Response getDiskUsage(@QueryParam("path") String path, omMetadataManager, reconSCM, path); duResponse = handler.getDuResponse( - listFile, withReplica, false); + listFile, withReplica, false, new Stats(-1)); return Response.ok(duResponse).build(); } @@ -160,7 +161,7 @@ public Response getDiskUsage(@QueryParam("path") String path, * @param creationDate Filter for keys created after creationDate in "MM-dd-yyyy HH:mm:ss" string format. * @param keySize Filter for Keys greater than keySize in bytes. * @param startPrefix Filter for startPrefix path. - * @param count Filter for limited count of keys. + * @param limit Filter for limited count of keys. * @return the list of keys in below structured format: * Response For OBS Bucket keys: * ******************************************************** @@ -194,7 +195,7 @@ public Response listKeysWithDu(@QueryParam("replicationType") String replication @QueryParam("creationDate") String creationDate, @DefaultValue(DEFAULT_KEY_SIZE) @QueryParam("keySize") long keySize, @DefaultValue(OM_KEY_PREFIX) @QueryParam("startPrefix") String startPrefix, - @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam("count") long count, + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam("count") long limit, @DefaultValue("false") @QueryParam("recursive") boolean recursive) throws IOException { @@ -210,7 +211,9 @@ public Response listKeysWithDu(@QueryParam("replicationType") String replication reconNamespaceSummaryManager, omMetadataManager, reconSCM, startPrefix); - duResponse = handler.getListKeysResponse(count, recursive); + Stats stats = new Stats(limit); + + duResponse = handler.getListKeysResponse(stats, recursive); List keyListWithDu = duResponse.getDuData(); @@ -230,7 +233,6 @@ public Response listKeysWithDu(@QueryParam("replicationType") String replication duResponse.setDuData(filteredKeyList); duResponse.setCount(filteredKeyList.size()); - return Response.ok(duResponse).build(); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java index 563cf0b7704..50dcd82635e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.slf4j.Logger; @@ -90,7 +91,7 @@ private BucketObjectDBInfo getBucketObjDbInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica, boolean recursive) + boolean listFile, boolean withReplica, boolean recursive, Stats stats) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -126,21 +127,25 @@ public DUResponse getDuResponse( long dataSize = getTotalSize(subdirObjectId); bucketDataSize += dataSize; + stats.setCurrentCount(stats.getCurrentCount() + 1); + if (withReplica) { long dirDU = getBucketHandler() - .calculateDUUnderObject(subdirObjectId, recursive, diskUsageList); + .calculateDUUnderObject(subdirObjectId, recursive, diskUsageList, stats); diskUsage.setSizeWithReplica(dirDU); bucketDataSizeWithReplica += dirDU; } diskUsage.setSize(dataSize); dirDUData.add(diskUsage); - dirDUData.addAll(diskUsageList); + if (diskUsageList.size() > 0) { + dirDUData.addAll(diskUsageList); + } } // Either listFile or withReplica is enabled, we need the directKeys info if (listFile || withReplica) { bucketDataSizeWithReplica += getBucketHandler() .handleDirectKeys(bucketObjectId, withReplica, - listFile, dirDUData, getNormalizedPath()); + listFile, dirDUData, getNormalizedPath(), stats); } if (withReplica) { duResponse.setSizeWithReplica(bucketDataSizeWithReplica); @@ -148,6 +153,8 @@ public DUResponse getDuResponse( duResponse.setCount(dirDUData.size()); duResponse.setSize(bucketDataSize); duResponse.setDuData(dirDUData); + duResponse.setTotalCount(stats.getTotalCount()); + duResponse.setLastKey(stats.getLastKey()); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 93c2a12d655..959271a97ea 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.slf4j.Logger; @@ -78,7 +79,7 @@ public abstract EntityType determineKeyPath(String keyName) throws IOException; public abstract long calculateDUUnderObject(long parentId, boolean recursive, - List diskUsageList) throws IOException; + List diskUsageList, Stats stats) throws IOException; public abstract long getDirObjectId(String[] names) throws IOException; @@ -86,7 +87,7 @@ public abstract long getDirObjectId(String[] names) public abstract long handleDirectKeys(long parentId, boolean withReplica, boolean listFile, List duData, - String normalizedPath) throws IOException; + String normalizedPath, Stats stats) throws IOException; public abstract long getDirObjectId(String[] names, int cutoff) throws IOException; @@ -232,4 +233,19 @@ public static BucketHandler getBucketHandler( return getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); } + + protected static void verifyStatsAndAddDURecord(List duData, Stats stats, + Table.KeyValue kv, + DUResponse.DiskUsage diskUsage) throws IOException { + if (stats.getLimit() == -1) { + duData.add(diskUsage); + } else { + if (stats.getCurrentCount() < stats.getLimit()) { + duData.add(diskUsage); + stats.setCurrentCount(stats.getCurrentCount() + 1); + stats.setLastKey(kv.getKey()); + } + } + stats.setTotalCount(stats.getTotalCount() + 1); + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java index 75001ba6e68..f5464c2228e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse; import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -80,7 +81,7 @@ private ObjectDBInfo getDirectoryObjDbInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica, boolean recursive) + boolean listFile, boolean withReplica, boolean recursive, Stats stats) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); @@ -134,7 +135,7 @@ public DUResponse getDuResponse( if (withReplica) { long subdirDU = getBucketHandler() - .calculateDUUnderObject(subdirObjectId, recursive, diskUsageList); + .calculateDUUnderObject(subdirObjectId, recursive, diskUsageList, stats); diskUsage.setSizeWithReplica(subdirDU); dirDataSizeWithReplica += subdirDU; } @@ -149,7 +150,7 @@ public DUResponse getDuResponse( if (listFile || withReplica) { dirDataSizeWithReplica += getBucketHandler() .handleDirectKeys(dirObjectId, withReplica, - listFile, subdirDUData, getNormalizedPath()); + listFile, subdirDUData, getNormalizedPath(), stats); } if (withReplica) { @@ -158,7 +159,8 @@ public DUResponse getDuResponse( duResponse.setCount(subdirDUData.size()); duResponse.setSize(dirDataSize); duResponse.setDuData(subdirDUData); - + duResponse.setTotalCount(stats.getTotalCount()); + duResponse.setLastKey(stats.getLastKey()); return duResponse; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index c434208f865..c3f0a0fc5ab 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; import org.apache.hadoop.ozone.recon.api.types.EntityType; import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -79,11 +80,11 @@ public abstract NamespaceSummaryResponse getSummaryResponse() throws IOException; public abstract DUResponse getDuResponse( - boolean listFile, boolean withReplica, boolean recursive) + boolean listFile, boolean withReplica, boolean recursive, Stats stats) throws IOException; - public DUResponse getListKeysResponse(long count, boolean recursive) throws IOException { - return getDuResponse(true, true, recursive); + public DUResponse getListKeysResponse(Stats stats, boolean recursive) throws IOException { + return getDuResponse(true, true, recursive, stats); } public abstract QuotaUsageResponse getQuotaResponse() diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java index ac0a0802c37..dd8152cdeab 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.api.types.EntityType; import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -118,7 +119,8 @@ public EntityType determineKeyPath(String keyName) // FileTable's key is in the format of "volumeId/bucketId/parentId/fileName" // Make use of RocksDB's order to seek to the prefix and avoid full iteration @Override - public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList) + public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList, + Stats stats) throws IOException { Table keyTable = getOmMetadataManager().getFileTable(); @@ -144,9 +146,18 @@ public long calculateDUUnderObject(long parentId, boolean recursive, List subDirIds = nsSummary.getChildDir(); for (long subDirId: subDirIds) { - totalDU += calculateDUUnderObject(subDirId, recursive, diskUsageList); + totalDU += calculateDUUnderObject(subDirId, recursive, diskUsageList, stats); } return totalDU; } @@ -222,12 +233,14 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, /** * This method handles disk usage of direct keys. - * @param parentId parent directory/bucket - * @param withReplica if withReplica is enabled, set sizeWithReplica - * for each direct key's DU - * @param listFile if listFile is enabled, append key DU as a subpath - * @param duData the current DU data + * + * @param parentId parent directory/bucket + * @param withReplica if withReplica is enabled, set sizeWithReplica + * for each direct key's DU + * @param listFile if listFile is enabled, append key DU as a subpath + * @param duData the current DU data * @param normalizedPath the normalized path request + * @param stats * @return the total DU of all direct keys * @throws IOException IOE */ @@ -235,7 +248,7 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, public long handleDirectKeys(long parentId, boolean withReplica, boolean listFile, List duData, - String normalizedPath) throws IOException { + String normalizedPath, Stats stats) throws IOException { Table keyTable = getOmMetadataManager().getFileTable(); long keyDataSizeWithReplica = 0L; @@ -278,7 +291,7 @@ public long handleDirectKeys(long parentId, boolean withReplica, } // list the key as a subpath if (listFile) { - duData.add(diskUsage); + verifyStatsAndAddDURecord(duData, stats, kv, diskUsage); } } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java index 1ee9bca3a2c..c9d8e880b9c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse; import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -71,7 +72,7 @@ private ObjectDBInfo getKeyDbObjectInfo(String[] names) @Override public DUResponse getDuResponse( - boolean listFile, boolean withReplica, boolean recursive) + boolean listFile, boolean withReplica, boolean recursive, Stats stats) throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index 4f4144211bd..d818c9ce47c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.api.types.EntityType; import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.slf4j.Logger; @@ -112,13 +113,15 @@ public EntityType determineKeyPath(String keyName) * handling only direct keys. * * @param parentId - * @param recursive - * @param diskUsageList + * @param recursive Whether to add keys recursively or just immediate du records. + * @param diskUsageList List to add du records. + * @param stats Staistics related to DU records count and limit. * @return total DU of direct keys under object * @throws IOException */ @Override - public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList) + public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList, + Stats stats) throws IOException { Table keyTable = getKeyTable(); @@ -177,19 +180,21 @@ public long calculateDUUnderObject(long parentId, boolean recursive, List subDirIds = nsSummary.getChildDir(); for (long subDirId: subDirIds) { - totalDU += calculateDUUnderObject(subDirId, recursive, diskUsageList); + totalDU += calculateDUUnderObject(subDirId, recursive, diskUsageList, stats); } return totalDU; } /** * This method handles disk usage of direct keys. - * @param parentId parent directory/bucket - * @param withReplica if withReplica is enabled, set sizeWithReplica - * for each direct key's DU - * @param listFile if listFile is enabled, append key DU as a subpath - * @param duData the current DU data + * + * @param parentId parent directory/bucket + * @param withReplica if withReplica is enabled, set sizeWithReplica + * for each direct key's DU + * @param listFile if listFile is enabled, append key DU as a subpath + * @param duData the current DU data * @param normalizedPath the normalized path request + * @param stats * @return the total DU of all direct keys * @throws IOException IOE */ @@ -197,7 +202,7 @@ public long calculateDUUnderObject(long parentId, boolean recursive, List duData, - String normalizedPath) throws IOException { + String normalizedPath, Stats stats) throws IOException { Table keyTable = getKeyTable(); long keyDataSizeWithReplica = 0L; @@ -264,7 +269,7 @@ public long handleDirectKeys(long parentId, boolean withReplica, } // list the key as a subpath if (listFile) { - duData.add(diskUsage); + verifyStatsAndAddDURecord(duData, stats, kv, diskUsage); } } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java index 76cbc3365e4..af610370bf1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.api.types.EntityType; import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -97,6 +98,7 @@ public EntityType determineKeyPath(String keyName) throws IOException { * keys * @param duData the current DU data * @param normalizedPath the normalized path request + * @param stats * @return the total DU of all direct keys * @throws IOException IOE */ @@ -104,7 +106,7 @@ public EntityType determineKeyPath(String keyName) throws IOException { public long handleDirectKeys(long parentId, boolean withReplica, boolean listFile, List duData, - String normalizedPath) throws IOException { + String normalizedPath, Stats stats) throws IOException { NSSummary nsSummary = getReconNamespaceSummaryManager() .getNSSummary(parentId); @@ -156,7 +158,7 @@ public long handleDirectKeys(long parentId, boolean withReplica, } // List all the keys for the OBS bucket if requested. if (listFile) { - duData.add(diskUsage); + verifyStatsAndAddDURecord(duData, stats, kv, diskUsage); } } } @@ -172,13 +174,15 @@ public long handleDirectKeys(long parentId, boolean withReplica, * all the keys in the bucket without the need to traverse directories. * * @param parentId The identifier for the parent bucket. - * @param recursive - * @param diskUsageList + * @param recursive Whether to add keys recursively or just immediate du records. + * @param diskUsageList List to add du records. + * @param stats Staistics related to DU records count and limit. * @return The total disk usage of all keys within the specified OBS bucket. * @throws IOException */ @Override - public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList) + public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList, + Stats stats) throws IOException { // Initialize the total disk usage variable. long totalDU = 0L; @@ -207,6 +211,7 @@ public long calculateDUUnderObject(long parentId, boolean recursive, List duData; @@ -118,6 +129,22 @@ public void setKeySize(long keySize) { this.keySize = keySize; } + public long getTotalCount() { + return totalCount; + } + + public void setTotalCount(long totalCount) { + this.totalCount = totalCount; + } + + public String getLastKey() { + return lastKey; + } + + public void setLastKey(String lastKey) { + this.lastKey = lastKey; + } + /** * DU info for a path (path name, data size). */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/Stats.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/Stats.java new file mode 100644 index 00000000000..f5dd2732400 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/Stats.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.types; + +/** + * Wrapper object for statistics of records of a page in API response. + */ +public class Stats { + /** + * Total count of the keys. + */ + private long totalCount; + + /** last key sent. */ + private String lastKey; + + /** + * limit the number of records to return in API response. + */ + private long limit; + + /** + * counter to track the number of records added in API response. + */ + private long currentCount; + + public Stats(long limit) { + this.limit = limit; + } + + public long getTotalCount() { + return totalCount; + } + + public void setTotalCount(long totalCount) { + this.totalCount = totalCount; + } + + public String getLastKey() { + return lastKey; + } + + public void setLastKey(String lastKey) { + this.lastKey = lastKey; + } + + public long getLimit() { + return limit; + } + + public void setLimit(long limit) { + this.limit = limit; + } + + public long getCurrentCount() { + return currentCount; + } + + public void setCurrentCount(long currentCount) { + this.currentCount = currentCount; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java index 57f7686263f..130c7772ebb 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.recon.api.types.EntityMetaData; import org.apache.hadoop.ozone.recon.api.types.EntityReadAccessHeatMapResponse; import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import jakarta.annotation.Nonnull; @@ -71,7 +72,7 @@ private long getEntitySize(String path) throws IOException { EntityHandler.getEntityHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, path); if (null != entityHandler) { - DUResponse duResponse = entityHandler.getDuResponse(false, false, false); + DUResponse duResponse = entityHandler.getDuResponse(false, false, false, new Stats(-1)); if (null != duResponse && duResponse.getStatus() == ResponseStatus.OK) { return duResponse.getSize(); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index ea2aedce411..72f8f9b1543 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -118,15 +118,21 @@ * │ ├── file2 * │ └── file3 * └── bucket2 (OBS) - * ├── file4 - * └── file5 + * │ ├── file4 + * │ └── file5 + * └── bucket5 (OBS) + * ├── file6 + * └── file7 * └── vol2 * ├── bucket3 (Legacy) * │ ├── file8 * │ ├── file9 * │ └── file10 * └── bucket4 (Legacy) - * └── file11 + * │ └── file11 + * └── bucket6 (Legacy) + * └── file12 + * └── file13 */ public class TestNSSummaryEndpointWithOBSAndLegacy { @TempDir @@ -152,6 +158,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final String BUCKET_THREE = "bucket3"; private static final String BUCKET_FOUR = "bucket4"; private static final String BUCKET_FIVE = "bucket5"; + private static final String BUCKET_SIX = "bucket6"; private static final String KEY_ONE = "file1"; private static final String KEY_TWO = "////file2"; private static final String KEY_THREE = "file3///"; @@ -164,6 +171,8 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final String KEY_TEN = "///__file10"; private static final String KEY_ELEVEN = "////file11"; private static final String MULTI_BLOCK_FILE = KEY_THREE; + private static final String KEY_TWELVE = "file12"; + private static final String KEY_THIRTEEN = "file13"; private static final long PARENT_OBJECT_ID_ZERO = 0L; private static final long VOL_OBJECT_ID = 0L; @@ -173,6 +182,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final long BUCKET_THREE_OBJECT_ID = 15L; private static final long BUCKET_FOUR_OBJECT_ID = 16L; private static final long BUCKET_FIVE_OBJECT_ID = 7L; + private static final long BUCKET_SIX_OBJECT_ID = 12L; private static final long KEY_ONE_OBJECT_ID = 3L; private static final long KEY_TWO_OBJECT_ID = 5L; private static final long KEY_THREE_OBJECT_ID = 8L; @@ -184,6 +194,8 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final long KEY_NINE_OBJECT_ID = 19L; private static final long KEY_TEN_OBJECT_ID = 20L; private static final long KEY_ELEVEN_OBJECT_ID = 21L; + private static final long KEY_TWELVE_OBJECT_ID = 22L; + private static final long KEY_THIRTEEN_OBJECT_ID = 23L; private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L; // container IDs @@ -222,6 +234,8 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final long FILE_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long FILE_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long FILE_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_TWELVE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_THIRTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long FILE1_SIZE_WITH_REPLICA = getReplicatedSize(FILE_ONE_SIZE, @@ -258,6 +272,12 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final long FILE11_SIZE_WITH_REPLICA = getReplicatedSize(FILE_ELEVEN_SIZE, StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE12_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_TWELVE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE13_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_THIRTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA = FILE3_SIZE_WITH_REPLICA; @@ -273,7 +293,9 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { + FILE8_SIZE_WITH_REPLICA + FILE9_SIZE_WITH_REPLICA + FILE10_SIZE_WITH_REPLICA - + FILE11_SIZE_WITH_REPLICA; + + FILE11_SIZE_WITH_REPLICA + + FILE12_SIZE_WITH_REPLICA + + FILE13_SIZE_WITH_REPLICA; private static final long MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL @@ -311,6 +333,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB; private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB; private static final long BUCKET_FIVE_QUOTA = OzoneConsts.MB; + private static final long BUCKET_SIX_QUOTA = OzoneConsts.MB; // mock client's path requests private static final String TEST_USER = "TestUser"; @@ -327,6 +350,8 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR; private static final String BUCKET_FIVE_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_FIVE; + private static final String BUCKET_SIX_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_SIX; private static final String KEY_ONE_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_ONE; private static final String KEY_TWO_PATH = @@ -349,6 +374,10 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_TEN; private static final String KEY_ELEVEN_PATH = ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR + ROOT_PATH + KEY_ELEVEN; + private static final String KEY_TWELVE_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_SIX + ROOT_PATH + KEY_TWELVE; + private static final String KEY_THIRTEEN_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_SIX + ROOT_PATH + KEY_THIRTEEN; private static final String KEY4_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; private static final String MULTI_BLOCK_KEY_PATH = @@ -358,12 +387,13 @@ public class TestNSSummaryEndpointWithOBSAndLegacy { // some expected answers private static final long ROOT_DATA_SIZE = FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE + FILE_SIX_SIZE + - FILE_SEVEN_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE; + FILE_SEVEN_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE + FILE_TWELVE_SIZE + + FILE_THIRTEEN_SIZE; private static final long VOL_DATA_SIZE = FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE + FILE_SIX_SIZE + FILE_SEVEN_SIZE; private static final long VOL_TWO_DATA_SIZE = - FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE; + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE + FILE_TWELVE_SIZE + FILE_THIRTEEN_SIZE; private static final long BUCKET_ONE_DATA_SIZE = FILE_ONE_SIZE + FILE_TWO_SIZE + @@ -449,8 +479,8 @@ public void testGetBasicInfoRoot() throws Exception { (NamespaceSummaryResponse) rootResponse.getEntity(); assertEquals(EntityType.ROOT, rootResponseObj.getEntityType()); assertEquals(2, rootResponseObj.getCountStats().getNumVolume()); - assertEquals(5, rootResponseObj.getCountStats().getNumBucket()); - assertEquals(11, rootResponseObj.getCountStats().getNumTotalKey()); + assertEquals(6, rootResponseObj.getCountStats().getNumBucket()); + assertEquals(13, rootResponseObj.getCountStats().getNumTotalKey()); } @Test @@ -480,8 +510,8 @@ public void testGetBasicInfoVolTwo() throws Exception { (NamespaceSummaryResponse) volTwoResponse.getEntity(); assertEquals(EntityType.VOLUME, volTwoResponseObj.getEntityType()); - assertEquals(2, volTwoResponseObj.getCountStats().getNumBucket()); - assertEquals(4, volTwoResponseObj.getCountStats().getNumTotalKey()); + assertEquals(3, volTwoResponseObj.getCountStats().getNumBucket()); + assertEquals(6, volTwoResponseObj.getCountStats().getNumTotalKey()); assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. getObjectDBInfo()).getAdmin()); assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. @@ -632,7 +662,7 @@ public void testDiskUsageVolTwo() throws Exception { Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_TWO_PATH, false, false); DUResponse duVolRes = (DUResponse) volResponse.getEntity(); - assertEquals(2, duVolRes.getCount()); + assertEquals(3, duVolRes.getCount()); List duData = duVolRes.getDuData(); // sort based on subpath Collections.sort(duData, @@ -897,7 +927,7 @@ public void testQuotaUsage() throws Exception { @Test public void testFileSizeDist() throws Exception { - checkFileSizeDist(ROOT_PATH, 4, 3, 3, 1); + checkFileSizeDist(ROOT_PATH, 4, 3, 5, 1); checkFileSizeDist(VOL_PATH, 4, 1, 1, 1); checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 0, 1); } @@ -1011,6 +1041,17 @@ public void testListKeysBucketFive() throws Exception { assertEquals(BUCKET_FIVE_DATA_SIZE, duBucketResponse.getSize()); } + @Test + public void testListKeysBucketSix() throws Exception { + // filter list keys under bucketSix based on RATIS ReplicationConfig and key creation date + // creationDate filter passed 1 minute above of KEY6 creation date, so listKeys API will return + // ZERO keys, as one RATIS keys got created after creationDate filter value. + Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", + "04-04-2024 12:20:00", 0, BUCKET_SIX_PATH, 10, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this LEGACY bucket. + assertEquals(2, duBucketResponse.getCount()); + } /** * Testing the following case. @@ -1020,15 +1061,21 @@ public void testListKeysBucketFive() throws Exception { * │ ├── file2 * │ └── file3 * └── bucket2 (OBS) - * ├── file4 - * └── file5 + * │ ├── file4 + * │ └── file5 + * └── bucket5 (OBS) + * ├── file6 + * └── file7 * └── vol2 * ├── bucket3 (Legacy) * │ ├── file8 * │ ├── file9 * │ └── file10 * └── bucket4 (Legacy) - * └── file11 + * │ └── file11 + * └── bucket6 (Legacy) + * └── file12 + * └── file13 * * Write these keys to OM and * replicate them. @@ -1119,6 +1166,33 @@ private void populateOMDB() throws Exception { ratisOne, epochMillis2, true); + writeKeyToOm(reconOMMetadataManager, + KEY_TWELVE, + BUCKET_SIX, + VOL_TWO, + KEY_TWELVE, + KEY_TWELVE_OBJECT_ID, + BUCKET_SIX_OBJECT_ID, + BUCKET_SIX_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_TWELVE_SIZE, + getLegacyBucketLayout(), + ratisOne, + epochMillis1, true); + writeKeyToOm(reconOMMetadataManager, + KEY_THIRTEEN, + BUCKET_SIX, + VOL_TWO, + KEY_THIRTEEN, + KEY_THIRTEEN_OBJECT_ID, + BUCKET_SIX_OBJECT_ID, + BUCKET_SIX_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_THIRTEEN_SIZE, + getLegacyBucketLayout(), + ratisOne, + epochMillis2, true); + writeKeyToOm(reconOMMetadataManager, KEY_EIGHT, BUCKET_THREE, @@ -1241,6 +1315,14 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketName(BUCKET_FIVE) .setObjectID(BUCKET_FIVE_OBJECT_ID) .setQuotaInBytes(BUCKET_FIVE_QUOTA) + .setBucketLayout(getOBSBucketLayout()) + .build(); + + OmBucketInfo bucketInfo6 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_SIX) + .setObjectID(BUCKET_SIX_OBJECT_ID) + .setQuotaInBytes(BUCKET_SIX_QUOTA) .setBucketLayout(getLegacyBucketLayout()) .build(); @@ -1254,12 +1336,15 @@ private static OMMetadataManager initializeNewOmMetadataManager( bucketInfo4.getVolumeName(), bucketInfo4.getBucketName()); String bucketKey5 = omMetadataManager.getBucketKey( bucketInfo5.getVolumeName(), bucketInfo5.getBucketName()); + String bucketKey6 = omMetadataManager.getBucketKey( + bucketInfo6.getVolumeName(), bucketInfo6.getBucketName()); omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3); omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4); omMetadataManager.getBucketTable().put(bucketKey5, bucketInfo5); + omMetadataManager.getBucketTable().put(bucketKey6, bucketInfo6); return omMetadataManager; } From cd4a53d2c406c73053d1fe057cd2c8192d9d79e7 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Tue, 16 Apr 2024 15:44:00 +0530 Subject: [PATCH 06/11] HDDS-10634. Fixed ReplicationType filter logic. --- .../ozone/recon/api/NSSummaryEndpoint.java | 24 ++- .../ozone/recon/api/types/DUResponse.java | 2 +- .../api/TestNSSummaryEndpointWithFSO.java | 2 - .../api/TestNSSummaryEndpointWithLegacy.java | 183 +++++++++++++++++- 4 files changed, 198 insertions(+), 13 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java index 73e4465251a..db99d19a487 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java @@ -151,7 +151,8 @@ public Response getDiskUsage(@QueryParam("path") String path, /** * This API will list out limited 'count' number of keys after applying below filters in API parameters: * Default Values of API param filters: - * -- replicationType - RATIS + * -- replicationType - empty string and filter will not be applied, so list out all keys irrespective of + * replication type. * -- creationTime - empty string and filter will not be applied, so list out keys irrespective of age. * -- keySize - 0 bytes, which means all keys greater than zero bytes will be listed, effectively all. * -- startPrefix - / @@ -162,6 +163,7 @@ public Response getDiskUsage(@QueryParam("path") String path, * @param keySize Filter for Keys greater than keySize in bytes. * @param startPrefix Filter for startPrefix path. * @param limit Filter for limited count of keys. + * @param recursive listing out keys recursively for FSO buckets. * @return the list of keys in below structured format: * Response For OBS Bucket keys: * ******************************************************** @@ -171,16 +173,28 @@ public Response getDiskUsage(@QueryParam("path") String path, * "size": 73400320, * "sizeWithReplica": 81788928, * "subPathCount": 1, + * "totalKeyCount": 7, + * "lastKey": "/volume1/obs-bucket/key7", * "subPaths": [ * { * "key": true, - * "path": "key7", + * "path": "key1", * "size": 10485760, * "sizeWithReplica": 18874368, * "isKey": true, - * "replicationType": "EC", + * "replicationType": "RATIS", * "creationTime": 1712321367060, * "modificationTime": 1712321368190 + * }, + * { + * "key": true, + * "path": "key7", + * "size": 10485760, + * "sizeWithReplica": 18874368, + * "isKey": true, + * "replicationType": "EC", + * "creationTime": 1713261005555, + * "modificationTime": 1713261006728 * } * ], * "sizeDirectKey": 73400320 @@ -225,10 +239,10 @@ public Response listKeysWithDu(@QueryParam("replicationType") String replication Predicate keyFilter = keyData -> keyData.isKey(); List filteredKeyList = keyListWithDu.stream() + .filter(keyFilter) .filter(keyData -> !StringUtils.isEmpty(creationDate) ? keyAgeFilter.test(keyData) : true) - .filter(keyData -> keyData.getReplicationType() != null ? keyReplicationFilter.test(keyData) : true) + .filter(keyData -> !StringUtils.isEmpty(replicationType) ? keyReplicationFilter.test(keyData) : true) .filter(keySizeFilter) - .filter(keyFilter) .collect(Collectors.toList()); duResponse.setDuData(filteredKeyList); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java index 1ad75391027..ad228a736dc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java @@ -49,7 +49,7 @@ public class DUResponse { private int count; /** Total number of subpaths/keys under the requested startPrefix path. */ - @JsonProperty("totalCount") + @JsonProperty("totalKeyCount") @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long totalCount; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index feb5246191f..b9ed0067081 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -123,8 +123,6 @@ public class TestNSSummaryEndpointWithFSO { private OzoneConfiguration ozoneConfiguration; private CommonUtils commonUtils; - private static int chunkSize = 1024 * 1024; - private ReplicationConfig ratisOne = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); private long epochMillis1 = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index 765399f71e3..43176d9957e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -134,6 +134,7 @@ public class TestNSSummaryEndpointWithLegacy { private static final String BUCKET_TWO = "bucket2"; private static final String BUCKET_THREE = "bucket3"; private static final String BUCKET_FOUR = "bucket4"; + private static final String BUCKET_FIVE = "bucket5"; private static final String KEY_ONE = "file1"; private static final String KEY_TWO = "dir1/dir2/file2"; private static final String KEY_THREE = "dir1/dir3/file3"; @@ -145,6 +146,12 @@ public class TestNSSummaryEndpointWithLegacy { private static final String KEY_NINE = "dir5/file9"; private static final String KEY_TEN = "dir5/file10"; private static final String KEY_ELEVEN = "file11"; + private static final String KEY_TWELVE = "file12"; + private static final String KEY_THIRTEEN = "file13"; + private static final String KEY_FOURTEEN = "file14"; + private static final String KEY_FIFTEEN = "file15"; + private static final String KEY_SIXTEEN = "file16"; + private static final String KEY_SEVENTEEN = "file17"; private static final String MULTI_BLOCK_KEY = "dir1/file7"; private static final String MULTI_BLOCK_FILE = "file7"; @@ -159,12 +166,21 @@ public class TestNSSummaryEndpointWithLegacy { private static final String FILE_NINE = "file9"; private static final String FILE_TEN = "file10"; private static final String FILE_ELEVEN = "file11"; + private static final String FILE_TWELVE = "file12"; + private static final String FILE_THIRTEEN = "file13"; + private static final String FILE_FOURTEEN = "file14"; + private static final String FILE_FIFTEEN = "file15"; + private static final String FILE_SIXTEEN = "file16"; + private static final String FILE_SEVENTEEN = "file17"; private static final String DIR_ONE = "dir1"; private static final String DIR_TWO = "dir2"; private static final String DIR_THREE = "dir3"; private static final String DIR_FOUR = "dir4"; private static final String DIR_FIVE = "dir5"; + private static final String DIR_SIX = "dir6"; + private static final String DIR_SEVEN = "dir7"; + private static final String DIR_EIGHT = "dir8"; // objects IDs private static final long PARENT_OBJECT_ID_ZERO = 0L; private static final long VOL_OBJECT_ID = 0L; @@ -190,6 +206,16 @@ public class TestNSSummaryEndpointWithLegacy { private static final long KEY_NINE_OBJECT_ID = 19L; private static final long KEY_TEN_OBJECT_ID = 20L; private static final long KEY_ELEVEN_OBJECT_ID = 21L; + private static final long BUCKET_FIVE_OBJECT_ID = 22L; + private static final long DIR_SIX_OBJECT_ID = 23L; + private static final long KEY_TWELVE_OBJECT_ID = 24L; + private static final long KEY_THIRTEEN_OBJECT_ID = 25L; + private static final long DIR_SEVEN_OBJECT_ID = 26L; + private static final long KEY_FOURTEEN_OBJECT_ID = 27L; + private static final long KEY_FIFTEEN_OBJECT_ID = 28L; + private static final long DIR_EIGHT_OBJECT_ID = 29L; + private static final long KEY_SIXTEEN_OBJECT_ID = 30L; + private static final long KEY_SEVENTEEN_OBJECT_ID = 31L; // container IDs private static final long CONTAINER_ONE_ID = 1L; @@ -228,6 +254,13 @@ public class TestNSSummaryEndpointWithLegacy { private static final long KEY_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long KEY_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long KEY_TWELVE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_THIRTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_FOURTEEN_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long KEY_FIFTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_SIXTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_SEVENTEEN_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE1_SIZE_WITH_REPLICA = getReplicatedSize(KEY_ONE_SIZE, StandaloneReplicationConfig.getInstance(ONE)); @@ -261,6 +294,24 @@ public class TestNSSummaryEndpointWithLegacy { private static final long FILE11_SIZE_WITH_REPLICA = getReplicatedSize(KEY_ELEVEN_SIZE, StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE12_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_TWELVE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE13_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_THIRTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE14_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_FOURTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE15_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_FIFTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE16_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_SIXTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE17_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_SEVENTEEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA = FILE7_SIZE_WITH_REPLICA; @@ -276,7 +327,13 @@ public class TestNSSummaryEndpointWithLegacy { + FILE8_SIZE_WITH_REPLICA + FILE9_SIZE_WITH_REPLICA + FILE10_SIZE_WITH_REPLICA - + FILE11_SIZE_WITH_REPLICA; + + FILE11_SIZE_WITH_REPLICA + + FILE12_SIZE_WITH_REPLICA + + FILE13_SIZE_WITH_REPLICA + + FILE14_SIZE_WITH_REPLICA + + FILE15_SIZE_WITH_REPLICA + + FILE16_SIZE_WITH_REPLICA + + FILE17_SIZE_WITH_REPLICA; private static final long MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL @@ -319,6 +376,7 @@ public class TestNSSummaryEndpointWithLegacy { private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB; private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB; private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB; + private static final long BUCKET_FIVE_QUOTA = OzoneConsts.MB; // mock client's path requests private static final String TEST_USER = "TestUser"; @@ -337,13 +395,15 @@ public class TestNSSummaryEndpointWithLegacy { // some expected answers private static final long ROOT_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + - KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + - KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; + KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + + KEY_ELEVEN_SIZE + KEY_TWELVE_SIZE + KEY_THIRTEEN_SIZE + KEY_FOURTEEN_SIZE + KEY_FIFTEEN_SIZE + KEY_SIXTEEN_SIZE + + KEY_SEVENTEEN_SIZE; private static final long VOL_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE; private static final long VOL_TWO_DATA_SIZE = - KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE + KEY_TWELVE_SIZE + KEY_THIRTEEN_SIZE + + KEY_FOURTEEN_SIZE + KEY_FIFTEEN_SIZE + KEY_SIXTEEN_SIZE + KEY_SEVENTEEN_SIZE; private static final long BUCKET_ONE_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_SIX_SIZE; @@ -673,7 +733,7 @@ public void testQuotaUsage() throws Exception { @Test public void testFileSizeDist() throws Exception { - checkFileSizeDist(ROOT_PATH, 2, 3, 4, 1); + checkFileSizeDist(ROOT_PATH, 2, 5, 8, 1); checkFileSizeDist(VOL_PATH, 2, 1, 2, 1); checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 1); checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 1); @@ -751,6 +811,36 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, getBucketLayout()); + writeDirToOm(reconOMMetadataManager, + (DIR_SIX + OM_KEY_PREFIX), + BUCKET_FIVE, + VOL_TWO, + DIR_SIX, + DIR_SIX_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + getBucketLayout()); + writeDirToOm(reconOMMetadataManager, + (DIR_SIX + OM_KEY_PREFIX + DIR_SEVEN + OM_KEY_PREFIX), + BUCKET_FIVE, + VOL_TWO, + DIR_SEVEN, + DIR_SEVEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + getBucketLayout()); + writeDirToOm(reconOMMetadataManager, + (DIR_SIX + OM_KEY_PREFIX + DIR_SEVEN + OM_KEY_PREFIX + DIR_EIGHT + OM_KEY_PREFIX), + BUCKET_FIVE, + VOL_TWO, + DIR_EIGHT, + DIR_EIGHT_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + getBucketLayout()); // write all keys writeKeyToOm(reconOMMetadataManager, @@ -863,6 +953,78 @@ private void populateOMDB() throws Exception { VOL_TWO_OBJECT_ID, KEY_ELEVEN_SIZE, getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_TWELVE, + BUCKET_FIVE, + VOL_TWO, + FILE_TWELVE, + KEY_TWELVE_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_TWELVE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_THIRTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_THIRTEEN, + KEY_THIRTEEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_THIRTEEN_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FOURTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_FOURTEEN, + KEY_FOURTEEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_FOURTEEN_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FIFTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_FIFTEEN, + KEY_FIFTEEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_FIFTEEN_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_SIXTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_SIXTEEN, + KEY_SIXTEEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_SIXTEEN_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_SEVENTEEN, + BUCKET_FIVE, + VOL_TWO, + FILE_SEVENTEEN, + KEY_SEVENTEEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FIVE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_SEVENTEEN_SIZE, + getBucketLayout()); } /** @@ -936,6 +1098,14 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketLayout(getBucketLayout()) .build(); + OmBucketInfo bucketInfo5 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_FIVE) + .setObjectID(BUCKET_FIVE_OBJECT_ID) + .setQuotaInBytes(BUCKET_FIVE_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + String bucketKey = omMetadataManager.getBucketKey( bucketInfo.getVolumeName(), bucketInfo.getBucketName()); String bucketKey2 = omMetadataManager.getBucketKey( @@ -944,11 +1114,14 @@ private static OMMetadataManager initializeNewOmMetadataManager( bucketInfo3.getVolumeName(), bucketInfo3.getBucketName()); String bucketKey4 = omMetadataManager.getBucketKey( bucketInfo4.getVolumeName(), bucketInfo4.getBucketName()); + String bucketKey5 = omMetadataManager.getBucketKey( + bucketInfo5.getVolumeName(), bucketInfo5.getBucketName()); omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3); omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4); + omMetadataManager.getBucketTable().put(bucketKey5, bucketInfo5); return omMetadataManager; } From 18d69911d2f2752eb44fe6f37d3f47b350daaeb5 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 17 Apr 2024 11:30:28 +0530 Subject: [PATCH 07/11] HDDS-10634. Added pagination test case. --- .../recon/api/handlers/KeyEntityHandler.java | 20 ++++++++++++++--- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 22 +++++++++++++++++-- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java index 3a3b1da940d..04eebebec91 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import java.io.IOException; +import java.util.ArrayList; /** * Class for handling key entity type. @@ -76,15 +77,28 @@ public DUResponse getDuResponse( throws IOException { DUResponse duResponse = new DUResponse(); duResponse.setPath(getNormalizedPath()); - // DU for key doesn't have subpaths - duResponse.setCount(0); OmKeyInfo keyInfo = getBucketHandler().getKeyInfo(getNames()); - + duResponse.setKeySize(keyInfo.getDataSize()); duResponse.setSize(keyInfo.getDataSize()); if (withReplica) { long keySizeWithReplica = keyInfo.getReplicatedSize(); duResponse.setSizeWithReplica(keySizeWithReplica); } + if (listFile) { + duResponse.setCount(1); + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + diskUsage.setKey(true); + diskUsage.setSubpath(getNormalizedPath()); + diskUsage.setSize(keyInfo.getDataSize()); + diskUsage.setSizeWithReplica(duResponse.getSizeWithReplica()); + diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name()); + diskUsage.setCreationTime(keyInfo.getCreationTime()); + diskUsage.setModificationTime(keyInfo.getModificationTime()); + ArrayList diskUsages = new ArrayList<>(); + diskUsages.add(diskUsage); + duResponse.setTotalCount(diskUsages.size()); + duResponse.setDuData(diskUsages); + } return duResponse; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index 30b4879bbc7..f47b722edd1 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -94,6 +94,7 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -1044,8 +1045,6 @@ public void testListKeysBucketFive() throws Exception { @Test public void testListKeysBucketSix() throws Exception { // filter list keys under bucketSix based on RATIS ReplicationConfig and key creation date - // creationDate filter passed 1 minute above of KEY6 creation date, so listKeys API will return - // ZERO keys, as one RATIS keys got created after creationDate filter value. Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", "04-04-2024 12:20:00", 0, BUCKET_SIX_PATH, 10, false); DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); @@ -1053,6 +1052,25 @@ public void testListKeysBucketSix() throws Exception { assertEquals(2, duBucketResponse.getCount()); } + @Test + public void testListKeysOnPageTwoForBucketSix() throws Exception { + // filter list keys under bucketSix based on RATIS ReplicationConfig and key creation date + Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("", + "04-04-2024 12:20:00", 0, BUCKET_SIX_PATH, 1, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // First page of keys under this LEGACY bucket. + assertEquals(1, duBucketResponse.getCount()); + assertEquals(2, duBucketResponse.getTotalCount()); + + // Second page of keys under this LEGACY bucket since lastKey + Response keyResponse = nsSummaryEndpoint.listKeysWithDu("", + "04-04-2024 12:20:00", 0, duBucketResponse.getLastKey(), 1, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(1, duKeyResponse.getCount()); + assertEquals(1, duKeyResponse.getTotalCount()); + assertNull(duKeyResponse.getLastKey()); + } + /** * Testing the following case. * └── vol From 9b210872a79434982ddceabd6f6ab1946423f77f Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 17 Apr 2024 14:05:49 +0530 Subject: [PATCH 08/11] HDDS-10634. Added pagination test case. --- .../ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index f47b722edd1..0135300aa24 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -755,7 +755,7 @@ public void testDiskUsageKey4() throws Exception { Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH, true, false, false); DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); - assertEquals(0, duKeyResponse.getCount()); + assertEquals(1, duKeyResponse.getCount()); assertEquals(FILE_FOUR_SIZE, duKeyResponse.getSize()); } From d381ee70294c02fa0759700d2ffb902f73130123 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Tue, 30 Apr 2024 14:28:24 +0530 Subject: [PATCH 09/11] HDDS-10634. Added integration test for NSSummaryEndpoint listKeys API. --- .../ozone/recon/TestNSSummaryEndPoint.java | 322 ++++++++++++++++++ .../ozone/recon/TestReconEndpointUtil.java | 25 ++ .../api/TestNSSummaryEndpointWithLegacy.java | 183 +--------- ...TestNSSummaryEndpointWithOBSAndLegacy.java | 2 +- 4 files changed, 353 insertions(+), 179 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java new file mode 100644 index 00000000000..ba50337d2d0 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java @@ -0,0 +1,322 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.recon; + +import com.fasterxml.jackson.core.JsonProcessingException; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.LambdaTestUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.slf4j.event.Level; + +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_RECON_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Integration tests for NSSummaryEndPoint APIs. + */ +@Timeout(300) +class TestNSSummaryEndPoint { + + private static OzoneBucket legacyOzoneBucket; + private static OzoneBucket fsoOzoneBucket; + private static OzoneBucket obsOzoneBucket; + private static final OzoneConfiguration CONF = new OzoneConfiguration(); + private static MiniOzoneCluster cluster; + private static NodeManager scmNodeManager; + private static ContainerManager scmContainerManager; + + @BeforeAll + static void init() throws Exception { + setupConfigKeys(); + cluster = MiniOzoneCluster.newBuilder(CONF) + .setNumDatanodes(5) + .includeRecon(true) + .build(); + cluster.waitForClusterToBeReady(); + GenericTestUtils.setLogLevel(ReconNodeManager.LOG, Level.DEBUG); + + StorageContainerManager scm = cluster.getStorageContainerManager(); + scmContainerManager = scm.getContainerManager(); + scmNodeManager = scm.getScmNodeManager(); + + ReconStorageContainerManagerFacade reconScm = + (ReconStorageContainerManagerFacade) + cluster.getReconServer().getReconStorageContainerManager(); + PipelineManager reconPipelineManager = reconScm.getPipelineManager(); + + LambdaTestUtils.await(60000, 5000, + () -> (reconPipelineManager.getPipelines().size() >= 4)); + + assertThat(scmContainerManager.getContainers()).isEmpty(); + + // Verify that all nodes are registered with Recon. + NodeManager reconNodeManager = reconScm.getScmNodeManager(); + assertEquals(scmNodeManager.getAllNodes().size(), + reconNodeManager.getAllNodes().size()); + + OzoneClient client = cluster.newClient(); + String volumeName = "vol1"; + String fsoBucketName = "fso-bucket"; + String legacyBucketName = "legacy-bucket"; + String obsBucketName = "obs-bucket"; + + // create a volume and a FSO bucket + fsoOzoneBucket = TestDataUtil.createVolumeAndBucket( + client, volumeName, fsoBucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + BucketArgs bucketArgs = new BucketArgs.Builder() + .setBucketLayout(BucketLayout.LEGACY) + .build(); + // create a LEGACY bucket + legacyOzoneBucket = TestDataUtil + .createBucket(client, volumeName, bucketArgs, legacyBucketName); + + bucketArgs = new BucketArgs.Builder() + .setBucketLayout(BucketLayout.OBJECT_STORE) + .build(); + // create a OBS bucket + obsOzoneBucket = TestDataUtil + .createBucket(client, volumeName, bucketArgs, obsBucketName); + + buildNameSpaceTree(obsOzoneBucket); + buildNameSpaceTree(legacyOzoneBucket); + buildNameSpaceTree(fsoOzoneBucket); + } + + /** + * Verify listKeys at different levels. + */ + private static void buildNameSpaceTree(OzoneBucket ozoneBucket) + throws Exception { + LinkedList keys = new LinkedList<>(); + keys.add("/a1/b1/c1111.tx"); + keys.add("/a1/b1/c1222.tx"); + keys.add("/a1/b1/c1333.tx"); + keys.add("/a1/b1/c1444.tx"); + keys.add("/a1/b1/c1555.tx"); + keys.add("/a1/b1/c1/c1.tx"); + keys.add("/a1/b1/c12/c2.tx"); + keys.add("/a1/b1/c12/c3.tx"); + + keys.add("/a1/b2/d1/d11.tx"); + keys.add("/a1/b2/d2/d21.tx"); + keys.add("/a1/b2/d2/d22.tx"); + keys.add("/a1/b2/d3/d31.tx"); + + keys.add("/a1/b3/e1/e11.tx"); + keys.add("/a1/b3/e2/e21.tx"); + keys.add("/a1/b3/e3/e31.tx"); + + createKeys(ozoneBucket, keys); + } + + private static void createKeys(OzoneBucket ozoneBucket, List keys) + throws Exception { + int length = 10; + byte[] input = new byte[length]; + Arrays.fill(input, (byte) 96); + for (String key : keys) { + createKey(ozoneBucket, key, 10, input); + } + } + + private static void createKey(OzoneBucket ozoneBucket, String key, int length, + byte[] input) throws Exception { + + OzoneOutputStream ozoneOutputStream = + ozoneBucket.createKey(key, length); + + ozoneOutputStream.write(input); + ozoneOutputStream.write(input, 0, 10); + ozoneOutputStream.close(); + + // Read the key with given key name. + OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key); + byte[] read = new byte[length]; + ozoneInputStream.read(read, 0, length); + ozoneInputStream.close(); + + assertEquals(new String(input, StandardCharsets.UTF_8), + new String(read, StandardCharsets.UTF_8)); + } + + @AfterAll + static void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + private static void setupConfigKeys() { + CONF.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, + 100, TimeUnit.MILLISECONDS); + CONF.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); + CONF.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + CONF.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS); + CONF.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS); + CONF.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS); + CONF.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS); + CONF.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); + CONF.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); + CONF.setTimeDuration(OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL, + 1, SECONDS); + CONF.setTimeDuration( + ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL, + 1, SECONDS); + CONF.setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, + 0, SECONDS); + CONF.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); + CONF.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); + CONF.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s"); + + CONF.setTimeDuration(HDDS_RECON_HEARTBEAT_INTERVAL, + 1, TimeUnit.SECONDS); + + CONF.setTimeDuration(OZONE_RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY, + 1, TimeUnit.SECONDS); + CONF.setTimeDuration(OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY, + 2, TimeUnit.SECONDS); + } + + @Test + void testListKeysForFSOBucket() throws Exception { + assertDirectKeysInFSOBucket(); + assertAllKeysRecursivelyInFSOBucket(); + } + + private static void assertDirectKeysInFSOBucket() throws JsonProcessingException, UnsupportedEncodingException { + // assert direct keys inside fsoBucket + DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/fso-bucket", false); + // No direct keys, total count provides all keys recursively in fso-bucket + // but since we passed recursive as false, so no list of keys under duData subpaths. + assertEquals(0, response.getCount()); + assertEquals(0, response.getDuData().size()); + assertEquals(15, response.getTotalCount()); + } + + private static void assertAllKeysRecursivelyInFSOBucket() + throws JsonProcessingException, UnsupportedEncodingException { + // assert direct keys inside fsoBucket + DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/fso-bucket", true); + // No direct keys, total count provides all keys recursively in fso-bucket + // but since we passed recursive as false, so no list of keys under duData subpaths. + assertEquals(15, response.getCount()); + assertEquals(15, response.getDuData().size()); + assertEquals(15, response.getTotalCount()); + assertEquals("vol1/fso-bucket/a1/b1/c12/c3.tx", response.getDuData().get(14).getSubpath()); + assertEquals(300, response.getSize()); + assertEquals(900, response.getSizeWithReplica()); + } + + @Test + void testListKeysForOBSBucket() throws Exception { + // Both assertion should give same count of keys. + assertDirectKeysInOBSBucket(); + assertAllKeysRecursivelyInOBSBucket(); + } + + private static void assertDirectKeysInOBSBucket() throws JsonProcessingException, UnsupportedEncodingException { + // assert direct keys inside obs-bucket + DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/obs-bucket", false); + assertEquals(15, response.getCount()); + assertEquals(15, response.getDuData().size()); + assertEquals(15, response.getTotalCount()); + } + + private static void assertAllKeysRecursivelyInOBSBucket() + throws JsonProcessingException, UnsupportedEncodingException { + // assert all keys inside obs-bucket + DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/obs-bucket", true); + + assertEquals(15, response.getCount()); + assertEquals(15, response.getDuData().size()); + assertEquals(15, response.getTotalCount()); + assertEquals("/a1/b3/e3/e31.tx", response.getDuData().get(14).getSubpath()); + assertEquals(300, response.getSize()); + assertEquals(900, response.getSizeWithReplica()); + } + + @Test + void testListKeysForLegacyBucket() throws Exception { + // Both assertion should give same count of keys. + assertDirectKeysInLegacyBucket(); + assertAllKeysInLegacyBucket(); + } + + private static void assertDirectKeysInLegacyBucket() throws JsonProcessingException, UnsupportedEncodingException { + // assert direct keys inside legacy-bucket + DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/legacy-bucket", false); + assertEquals(15, response.getCount()); + assertEquals(15, response.getDuData().size()); + assertEquals(15, response.getTotalCount()); + } + + private static void assertAllKeysInLegacyBucket() + throws JsonProcessingException, UnsupportedEncodingException { + // assert all keys inside legacy-bucket + DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/legacy-bucket", true); + + assertEquals(15, response.getCount()); + assertEquals(15, response.getDuData().size()); + assertEquals(15, response.getTotalCount()); + assertEquals("/a1/b3/e3/e31.tx", response.getDuData().get(14).getSubpath()); + assertEquals(300, response.getSize()); + assertEquals(900, response.getSizeWithReplica()); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java index 002de94cb02..d61b35077fd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java @@ -23,15 +23,18 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.http.HttpConfig; import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainersResponse; import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.InputStream; +import java.io.UnsupportedEncodingException; import java.net.ConnectException; import java.net.HttpURLConnection; import java.net.URL; +import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import static java.net.HttpURLConnection.HTTP_CREATED; @@ -57,6 +60,7 @@ public final class TestReconEndpointUtil { private static final String CONTAINER_ENDPOINT = "/api/v1/containers"; private static final String OM_DB_SYNC_ENDPOINT = "/api/v1/triggerdbsync/om"; + private static final String LISTKEYS_ENDPOINT = "/api/v1/namespace/listKeys"; private TestReconEndpointUtil() { } @@ -102,6 +106,27 @@ public static UnhealthyContainersResponse getUnhealthyContainersFromRecon( UnhealthyContainersResponse.class); } + public static DUResponse listKeysFromRecon(OzoneConfiguration conf, String startPrefix, boolean recursive) + throws JsonProcessingException, UnsupportedEncodingException { + String encodedStartPrefix = URLEncoder.encode(startPrefix, "UTF-8"); + String query = "?startPrefix=" + encodedStartPrefix + "&recursive=" + recursive; + + StringBuilder urlBuilder = new StringBuilder(); + urlBuilder.append(getReconWebAddress(conf)) + .append(LISTKEYS_ENDPOINT) + .append(query); + + String listKeysResponse = ""; + try { + listKeysResponse = makeHttpCall(conf, urlBuilder); + } catch (Exception e) { + LOG.error("Error getting list keys response from Recon"); + } + + final ObjectMapper objectMapper = new ObjectMapper(); + return objectMapper.readValue(listKeysResponse, DUResponse.class); + } + public static String makeHttpCall(OzoneConfiguration conf, StringBuilder url) throws Exception { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index 95ee830118f..a5064ba5bef 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -134,7 +134,6 @@ public class TestNSSummaryEndpointWithLegacy { private static final String BUCKET_TWO = "bucket2"; private static final String BUCKET_THREE = "bucket3"; private static final String BUCKET_FOUR = "bucket4"; - private static final String BUCKET_FIVE = "bucket5"; private static final String KEY_ONE = "file1"; private static final String KEY_TWO = "dir1/dir2/file2"; private static final String KEY_THREE = "dir1/dir3/file3"; @@ -146,12 +145,6 @@ public class TestNSSummaryEndpointWithLegacy { private static final String KEY_NINE = "dir5/file9"; private static final String KEY_TEN = "dir5/file10"; private static final String KEY_ELEVEN = "file11"; - private static final String KEY_TWELVE = "file12"; - private static final String KEY_THIRTEEN = "file13"; - private static final String KEY_FOURTEEN = "file14"; - private static final String KEY_FIFTEEN = "file15"; - private static final String KEY_SIXTEEN = "file16"; - private static final String KEY_SEVENTEEN = "file17"; private static final String MULTI_BLOCK_KEY = "dir1/file7"; private static final String MULTI_BLOCK_FILE = "file7"; @@ -166,21 +159,12 @@ public class TestNSSummaryEndpointWithLegacy { private static final String FILE_NINE = "file9"; private static final String FILE_TEN = "file10"; private static final String FILE_ELEVEN = "file11"; - private static final String FILE_TWELVE = "file12"; - private static final String FILE_THIRTEEN = "file13"; - private static final String FILE_FOURTEEN = "file14"; - private static final String FILE_FIFTEEN = "file15"; - private static final String FILE_SIXTEEN = "file16"; - private static final String FILE_SEVENTEEN = "file17"; private static final String DIR_ONE = "dir1"; private static final String DIR_TWO = "dir2"; private static final String DIR_THREE = "dir3"; private static final String DIR_FOUR = "dir4"; private static final String DIR_FIVE = "dir5"; - private static final String DIR_SIX = "dir6"; - private static final String DIR_SEVEN = "dir7"; - private static final String DIR_EIGHT = "dir8"; // objects IDs private static final long PARENT_OBJECT_ID_ZERO = 0L; private static final long VOL_OBJECT_ID = 0L; @@ -206,16 +190,6 @@ public class TestNSSummaryEndpointWithLegacy { private static final long KEY_NINE_OBJECT_ID = 19L; private static final long KEY_TEN_OBJECT_ID = 20L; private static final long KEY_ELEVEN_OBJECT_ID = 21L; - private static final long BUCKET_FIVE_OBJECT_ID = 22L; - private static final long DIR_SIX_OBJECT_ID = 23L; - private static final long KEY_TWELVE_OBJECT_ID = 24L; - private static final long KEY_THIRTEEN_OBJECT_ID = 25L; - private static final long DIR_SEVEN_OBJECT_ID = 26L; - private static final long KEY_FOURTEEN_OBJECT_ID = 27L; - private static final long KEY_FIFTEEN_OBJECT_ID = 28L; - private static final long DIR_EIGHT_OBJECT_ID = 29L; - private static final long KEY_SIXTEEN_OBJECT_ID = 30L; - private static final long KEY_SEVENTEEN_OBJECT_ID = 31L; // container IDs private static final long CONTAINER_ONE_ID = 1L; @@ -254,13 +228,6 @@ public class TestNSSummaryEndpointWithLegacy { private static final long KEY_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 private static final long KEY_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 - private static final long KEY_TWELVE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 - private static final long KEY_THIRTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 - private static final long KEY_FOURTEEN_SIZE = OzoneConsts.KB + 1; // bin 1 - private static final long KEY_FIFTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 - private static final long KEY_SIXTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 - private static final long KEY_SEVENTEEN_SIZE = OzoneConsts.KB + 1; // bin 1 - private static final long FILE1_SIZE_WITH_REPLICA = getReplicatedSize(KEY_ONE_SIZE, StandaloneReplicationConfig.getInstance(ONE)); @@ -294,24 +261,6 @@ public class TestNSSummaryEndpointWithLegacy { private static final long FILE11_SIZE_WITH_REPLICA = getReplicatedSize(KEY_ELEVEN_SIZE, StandaloneReplicationConfig.getInstance(ONE)); - private static final long FILE12_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_TWELVE_SIZE, - StandaloneReplicationConfig.getInstance(ONE)); - private static final long FILE13_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_THIRTEEN_SIZE, - StandaloneReplicationConfig.getInstance(ONE)); - private static final long FILE14_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_FOURTEEN_SIZE, - StandaloneReplicationConfig.getInstance(ONE)); - private static final long FILE15_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_FIFTEEN_SIZE, - StandaloneReplicationConfig.getInstance(ONE)); - private static final long FILE16_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_SIXTEEN_SIZE, - StandaloneReplicationConfig.getInstance(ONE)); - private static final long FILE17_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_SEVENTEEN_SIZE, - StandaloneReplicationConfig.getInstance(ONE)); private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA = FILE7_SIZE_WITH_REPLICA; @@ -327,13 +276,7 @@ public class TestNSSummaryEndpointWithLegacy { + FILE8_SIZE_WITH_REPLICA + FILE9_SIZE_WITH_REPLICA + FILE10_SIZE_WITH_REPLICA - + FILE11_SIZE_WITH_REPLICA - + FILE12_SIZE_WITH_REPLICA - + FILE13_SIZE_WITH_REPLICA - + FILE14_SIZE_WITH_REPLICA - + FILE15_SIZE_WITH_REPLICA - + FILE16_SIZE_WITH_REPLICA - + FILE17_SIZE_WITH_REPLICA; + + FILE11_SIZE_WITH_REPLICA; private static final long MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL @@ -376,7 +319,6 @@ public class TestNSSummaryEndpointWithLegacy { private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB; private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB; private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB; - private static final long BUCKET_FIVE_QUOTA = OzoneConsts.MB; // mock client's path requests private static final String TEST_USER = "TestUser"; @@ -395,15 +337,13 @@ public class TestNSSummaryEndpointWithLegacy { // some expected answers private static final long ROOT_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + - KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + - KEY_ELEVEN_SIZE + KEY_TWELVE_SIZE + KEY_THIRTEEN_SIZE + KEY_FOURTEEN_SIZE + KEY_FIFTEEN_SIZE + KEY_SIXTEEN_SIZE + - KEY_SEVENTEEN_SIZE; + KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE + + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; private static final long VOL_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE; private static final long VOL_TWO_DATA_SIZE = - KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE + KEY_TWELVE_SIZE + KEY_THIRTEEN_SIZE + - KEY_FOURTEEN_SIZE + KEY_FIFTEEN_SIZE + KEY_SIXTEEN_SIZE + KEY_SEVENTEEN_SIZE; + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; private static final long BUCKET_ONE_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_SIX_SIZE; @@ -733,7 +673,7 @@ public void testQuotaUsage() throws Exception { @Test public void testFileSizeDist() throws Exception { - checkFileSizeDist(ROOT_PATH, 2, 5, 8, 1); + checkFileSizeDist(ROOT_PATH, 2, 3, 4, 1); checkFileSizeDist(VOL_PATH, 2, 1, 2, 1); checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 1); checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 1); @@ -811,36 +751,6 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, getBucketLayout()); - writeDirToOm(reconOMMetadataManager, - (DIR_SIX + OM_KEY_PREFIX), - BUCKET_FIVE, - VOL_TWO, - DIR_SIX, - DIR_SIX_OBJECT_ID, - PARENT_OBJECT_ID_ZERO, - BUCKET_FIVE_OBJECT_ID, - VOL_TWO_OBJECT_ID, - getBucketLayout()); - writeDirToOm(reconOMMetadataManager, - (DIR_SIX + OM_KEY_PREFIX + DIR_SEVEN + OM_KEY_PREFIX), - BUCKET_FIVE, - VOL_TWO, - DIR_SEVEN, - DIR_SEVEN_OBJECT_ID, - PARENT_OBJECT_ID_ZERO, - BUCKET_FIVE_OBJECT_ID, - VOL_TWO_OBJECT_ID, - getBucketLayout()); - writeDirToOm(reconOMMetadataManager, - (DIR_SIX + OM_KEY_PREFIX + DIR_SEVEN + OM_KEY_PREFIX + DIR_EIGHT + OM_KEY_PREFIX), - BUCKET_FIVE, - VOL_TWO, - DIR_EIGHT, - DIR_EIGHT_OBJECT_ID, - PARENT_OBJECT_ID_ZERO, - BUCKET_FIVE_OBJECT_ID, - VOL_TWO_OBJECT_ID, - getBucketLayout()); // write all keys writeKeyToOm(reconOMMetadataManager, @@ -953,78 +863,6 @@ private void populateOMDB() throws Exception { VOL_TWO_OBJECT_ID, KEY_ELEVEN_SIZE, getBucketLayout()); - - writeKeyToOm(reconOMMetadataManager, - KEY_TWELVE, - BUCKET_FIVE, - VOL_TWO, - FILE_TWELVE, - KEY_TWELVE_OBJECT_ID, - PARENT_OBJECT_ID_ZERO, - BUCKET_FIVE_OBJECT_ID, - VOL_TWO_OBJECT_ID, - KEY_TWELVE_SIZE, - getBucketLayout()); - - writeKeyToOm(reconOMMetadataManager, - KEY_THIRTEEN, - BUCKET_FIVE, - VOL_TWO, - FILE_THIRTEEN, - KEY_THIRTEEN_OBJECT_ID, - PARENT_OBJECT_ID_ZERO, - BUCKET_FIVE_OBJECT_ID, - VOL_TWO_OBJECT_ID, - KEY_THIRTEEN_SIZE, - getBucketLayout()); - - writeKeyToOm(reconOMMetadataManager, - KEY_FOURTEEN, - BUCKET_FIVE, - VOL_TWO, - FILE_FOURTEEN, - KEY_FOURTEEN_OBJECT_ID, - PARENT_OBJECT_ID_ZERO, - BUCKET_FIVE_OBJECT_ID, - VOL_TWO_OBJECT_ID, - KEY_FOURTEEN_SIZE, - getBucketLayout()); - - writeKeyToOm(reconOMMetadataManager, - KEY_FIFTEEN, - BUCKET_FIVE, - VOL_TWO, - FILE_FIFTEEN, - KEY_FIFTEEN_OBJECT_ID, - PARENT_OBJECT_ID_ZERO, - BUCKET_FIVE_OBJECT_ID, - VOL_TWO_OBJECT_ID, - KEY_FIFTEEN_SIZE, - getBucketLayout()); - - writeKeyToOm(reconOMMetadataManager, - KEY_SIXTEEN, - BUCKET_FIVE, - VOL_TWO, - FILE_SIXTEEN, - KEY_SIXTEEN_OBJECT_ID, - PARENT_OBJECT_ID_ZERO, - BUCKET_FIVE_OBJECT_ID, - VOL_TWO_OBJECT_ID, - KEY_SIXTEEN_SIZE, - getBucketLayout()); - - writeKeyToOm(reconOMMetadataManager, - KEY_SEVENTEEN, - BUCKET_FIVE, - VOL_TWO, - FILE_SEVENTEEN, - KEY_SEVENTEEN_OBJECT_ID, - PARENT_OBJECT_ID_ZERO, - BUCKET_FIVE_OBJECT_ID, - VOL_TWO_OBJECT_ID, - KEY_SEVENTEEN_SIZE, - getBucketLayout()); } /** @@ -1098,14 +936,6 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketLayout(getBucketLayout()) .build(); - OmBucketInfo bucketInfo5 = OmBucketInfo.newBuilder() - .setVolumeName(VOL_TWO) - .setBucketName(BUCKET_FIVE) - .setObjectID(BUCKET_FIVE_OBJECT_ID) - .setQuotaInBytes(BUCKET_FIVE_QUOTA) - .setBucketLayout(getBucketLayout()) - .build(); - String bucketKey = omMetadataManager.getBucketKey( bucketInfo.getVolumeName(), bucketInfo.getBucketName()); String bucketKey2 = omMetadataManager.getBucketKey( @@ -1114,14 +944,11 @@ private static OMMetadataManager initializeNewOmMetadataManager( bucketInfo3.getVolumeName(), bucketInfo3.getBucketName()); String bucketKey4 = omMetadataManager.getBucketKey( bucketInfo4.getVolumeName(), bucketInfo4.getBucketName()); - String bucketKey5 = omMetadataManager.getBucketKey( - bucketInfo5.getVolumeName(), bucketInfo5.getBucketName()); omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3); omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4); - omMetadataManager.getBucketTable().put(bucketKey5, bucketInfo5); return omMetadataManager; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index 0135300aa24..a318a30d04c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -1048,7 +1048,7 @@ public void testListKeysBucketSix() throws Exception { Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", "04-04-2024 12:20:00", 0, BUCKET_SIX_PATH, 10, false); DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); - // There are no sub-paths under this LEGACY bucket. + // There will be two keys (sub-paths) under this LEGACY bucket. assertEquals(2, duBucketResponse.getCount()); } From 871769fc6ee554dad4783e2d196ae9e6ce5c1703 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Tue, 30 Apr 2024 15:19:43 +0530 Subject: [PATCH 10/11] HDDS-10634. Added integration test for NSSummaryEndpoint listKeys API. --- .../ozone/recon/TestNSSummaryEndPoint.java | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java index ba50337d2d0..6fa5e6b4fc1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java @@ -141,6 +141,36 @@ static void init() throws Exception { /** * Verify listKeys at different levels. + * . + * └── volume + * └── bucket + * └── a1 + * ├── b1 + * │ ├── c1111.tx + * │ ├── c1222.tx + * │ ├── c1333.tx + * │ ├── c1444.tx + * │ ├── c1555.tx + * │ ├── c1 + * │ │ └── c1.tx + * │ └── c12 + * │ ├── c2.tx + * │ └── c3.tx + * ├── b2 + * │ ├── d1 + * │ │ └── d11.tx + * │ ├── d2 + * │ │ ├── d21.tx + * │ │ └── d22.tx + * │ └── d3 + * │ └── d31.tx + * └── b3 + * ├── e1 + * │ └── e11.tx + * ├── e2 + * │ └── e21.tx + * └── e3 + * └── e31.tx */ private static void buildNameSpaceTree(OzoneBucket ozoneBucket) throws Exception { From 6660e7612aa42a342ba2f60cbd38e70009540347 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Sun, 5 May 2024 09:01:09 +0530 Subject: [PATCH 11/11] HDDS-10634. Added scenario for volume path. --- .../ozone/recon/api/OMDBInsightEndpoint.java | 111 ++++++++++++++++++ .../api/handlers/VolumeEntityHandler.java | 2 + 2 files changed, 113 insertions(+) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index baa9c522be1..f42f4e1216e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -26,9 +26,14 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.Stats; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; @@ -53,6 +58,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.TimeZone; +import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; @@ -60,6 +68,7 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_FSO; @@ -701,6 +710,108 @@ private String createPath(OmKeyInfo omKeyInfo) { omKeyInfo.getBucketName() + OM_KEY_PREFIX + omKeyInfo.getKeyName(); } + /** + * This API will list out limited 'count' number of keys after applying below filters in API parameters: + * Default Values of API param filters: + * -- replicationType - empty string and filter will not be applied, so list out all keys irrespective of + * replication type. + * -- creationTime - empty string and filter will not be applied, so list out keys irrespective of age. + * -- keySize - 0 bytes, which means all keys greater than zero bytes will be listed, effectively all. + * -- startPrefix - / + * -- count - 1000 + * + * @param replicationType Filter for RATIS or EC replication keys + * @param creationDate Filter for keys created after creationDate in "MM-dd-yyyy HH:mm:ss" string format. + * @param keySize Filter for Keys greater than keySize in bytes. + * @param startPrefix Filter for startPrefix path. + * @param limit Filter for limited count of keys. + * @param recursive listing out keys recursively for FSO buckets. + * @return the list of keys in below structured format: + * Response For OBS Bucket keys: + * ******************************************************** + * { + * "status": "OK", + * "path": "/volume1/obs-bucket/", + * "size": 73400320, + * "sizeWithReplica": 81788928, + * "subPathCount": 1, + * "totalKeyCount": 7, + * "lastKey": "/volume1/obs-bucket/key7", + * "subPaths": [ + * { + * "key": true, + * "path": "key1", + * "size": 10485760, + * "sizeWithReplica": 18874368, + * "isKey": true, + * "replicationType": "RATIS", + * "creationTime": 1712321367060, + * "modificationTime": 1712321368190 + * }, + * { + * "key": true, + * "path": "key7", + * "size": 10485760, + * "sizeWithReplica": 18874368, + * "isKey": true, + * "replicationType": "EC", + * "creationTime": 1713261005555, + * "modificationTime": 1713261006728 + * } + * ], + * "sizeDirectKey": 73400320 + * } + * ******************************************************** + * @throws IOException + */ + @GET + @Path("/listKeys") + @SuppressWarnings("methodlength") + public Response listKeysWithDu(@QueryParam("replicationType") String replicationType, + @QueryParam("creationDate") String creationDate, + @DefaultValue(DEFAULT_KEY_SIZE) @QueryParam("keySize") long keySize, + @DefaultValue(OM_KEY_PREFIX) @QueryParam("startPrefix") String startPrefix, + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam("count") long limit, + @DefaultValue("false") @QueryParam("recursive") boolean recursive) + throws IOException { + + if (startPrefix == null || startPrefix.length() == 0) { + return Response.status(Response.Status.BAD_REQUEST).build(); + } + DUResponse duResponse = new DUResponse(); + if (!isInitializationComplete()) { + duResponse.setStatus(ResponseStatus.INITIALIZING); + return Response.ok(duResponse).build(); + } + EntityHandler handler = EntityHandler.getEntityHandler( + reconNamespaceSummaryManager, + omMetadataManager, reconSCM, startPrefix); + + Stats stats = new Stats(limit); + + duResponse = handler.getListKeysResponse(stats, recursive); + + List keyListWithDu = duResponse.getDuData(); + + long epochMillis = ReconUtils.convertToEpochMillis(creationDate, "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault()); + Predicate keyAgeFilter = keyData -> keyData.getCreationTime() >= epochMillis; + Predicate keyReplicationFilter = + keyData -> keyData.getReplicationType().equals(replicationType); + Predicate keySizeFilter = keyData -> keyData.getSize() > keySize; + Predicate keyFilter = keyData -> keyData.isKey(); + + List filteredKeyList = keyListWithDu.stream() + .filter(keyFilter) + .filter(keyData -> !StringUtils.isEmpty(creationDate) ? keyAgeFilter.test(keyData) : true) + .filter(keyData -> !StringUtils.isEmpty(replicationType) ? keyReplicationFilter.test(keyData) : true) + .filter(keySizeFilter) + .collect(Collectors.toList()); + + duResponse.setDuData(filteredKeyList); + duResponse.setCount(filteredKeyList.size()); + return Response.ok(duResponse).build(); + } + @VisibleForTesting public GlobalStatsDao getDao() { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java index 6ee88a3e34e..b39180f446f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java @@ -146,6 +146,8 @@ public DUResponse getDuResponse( } duResponse.setDuData(bucketDuData); + duResponse.setTotalCount(stats.getTotalCount()); + duResponse.setLastKey(stats.getLastKey()); return duResponse; }