From 637cb913469c5dd022e8c4b8623c200ef9fedf2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 24 Aug 2024 11:38:09 +0200 Subject: [PATCH 001/106] HDDS-11361. Bump Jersey2 to 2.44 (#7113) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index b5a6323bed9e..d68f3298c32c 100644 --- a/pom.xml +++ b/pom.xml @@ -155,7 +155,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.19.4 - 2.43 + 2.44 1.9.13 From 9dd18f16113ed4cb77167a94aa442fe9de216af5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 24 Aug 2024 16:31:55 +0200 Subject: [PATCH 002/106] HDDS-11362. Bump snappy-java to 1.1.10.6 (#7114) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index d68f3298c32c..0f88d31e6f0b 100644 --- a/pom.xml +++ b/pom.xml @@ -308,7 +308,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.2.1 3.9.8 - 1.1.10.5 + 1.1.10.6 1.2.0 9.40 From 45b7056c689a2cf8782d4fd9950f845cc63c69f4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 24 Aug 2024 17:43:27 +0200 Subject: [PATCH 003/106] HDDS-11363. Bump develocity-maven-extension to 1.22 (#7115) --- .mvn/extensions.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index ac1c913fd502..4e1cfae4489b 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -24,7 +24,7 @@ com.gradle develocity-maven-extension - 1.21.5 + 1.22 com.gradle From 350a340277dbb94f7a3157594d1b705f33c1d195 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 24 Aug 2024 22:01:19 +0200 Subject: [PATCH 004/106] HDDS-11364. Bump jgraphx to 3.9.12 (#7116) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 0f88d31e6f0b..4a8cac1bb50f 100644 --- a/pom.xml +++ b/pom.xml @@ -296,7 +296,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.15.0 2.5.0 1.4.0 - 3.9.8.1 + 3.9.12 5.3.39 3.11.10 From 8f8d8094c72abc2a535b5243c8d1411cc11e7fa4 Mon Sep 17 00:00:00 2001 From: Arafat2198 Date: Mon, 26 Aug 2024 18:44:03 +0530 Subject: [PATCH 005/106] HDDS-11309. Increase CONTAINER_STATE Column Length in UNHEALTHY_CONTAINERS to Avoid Truncation (#7071) --- .../schema/ContainerSchemaDefinition.java | 2 +- .../ContainerHealthSchemaManager.java | 4 +- .../recon/fsck/TestContainerHealthTask.java | 92 ++++++++++++++++++- 3 files changed, 93 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 4d62ca886cda..7c293ff1861e 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -51,7 +51,7 @@ public enum UnHealthyContainerStates { UNDER_REPLICATED, OVER_REPLICATED, MIS_REPLICATED, - ALL_REPLICAS_UNHEALTHY, + ALL_REPLICAS_BAD, NEGATIVE_SIZE // Added new state to track containers with negative sizes } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java index 0c13376fa526..9ccc09d8d039 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.recon.persistence; import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.UNDER_REPLICATED; -import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_BAD; import static org.hadoop.ozone.recon.schema.tables.UnhealthyContainersTable.UNHEALTHY_CONTAINERS; import static org.jooq.impl.DSL.count; @@ -76,7 +76,7 @@ public List getUnhealthyContainers( SelectQuery query = dslContext.selectQuery(); query.addFrom(UNHEALTHY_CONTAINERS); if (state != null) { - if (state.equals(ALL_REPLICAS_UNHEALTHY)) { + if (state.equals(ALL_REPLICAS_BAD)) { query.addConditions(UNHEALTHY_CONTAINERS.CONTAINER_STATE .eq(UNDER_REPLICATED.toString())); query.addConditions(UNHEALTHY_CONTAINERS.ACTUAL_REPLICA_COUNT.eq(0)); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 8647639dd134..ae46bd8b5b5f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -20,9 +20,12 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.assertj.core.api.Assertions.assertThat; -import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_BAD; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; @@ -199,7 +202,7 @@ public void testRun() throws Exception { List unhealthyContainers = containerHealthSchemaManager.getUnhealthyContainers( - ALL_REPLICAS_UNHEALTHY, 0, Integer.MAX_VALUE); + ALL_REPLICAS_BAD, 0, Integer.MAX_VALUE); assertEquals(1, unhealthyContainers.size()); assertEquals(2L, unhealthyContainers.get(0).getContainerId().longValue()); @@ -384,6 +387,91 @@ public void testDeletedContainer() throws Exception { .isGreaterThan(currentTime); } + @Test + public void testAllContainerStateInsertions() { + UnhealthyContainersDao unHealthyContainersTableHandle = + getDao(UnhealthyContainersDao.class); + + ContainerHealthSchemaManager containerHealthSchemaManager = + new ContainerHealthSchemaManager( + getSchemaDefinition(ContainerSchemaDefinition.class), + unHealthyContainersTableHandle); + + // Iterate through each state in the UnHealthyContainerStates enum + for (ContainerSchemaDefinition.UnHealthyContainerStates state : + ContainerSchemaDefinition.UnHealthyContainerStates.values()) { + + // Create a dummy UnhealthyContainer record with the current state + UnhealthyContainers unhealthyContainer = new UnhealthyContainers(); + unhealthyContainer.setContainerId(state.ordinal() + 1L); + + // Set replica counts based on the state + switch (state) { + case MISSING: + case EMPTY_MISSING: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(0); + unhealthyContainer.setReplicaDelta(3); + break; + + case UNDER_REPLICATED: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(1); + unhealthyContainer.setReplicaDelta(2); + break; + + case OVER_REPLICATED: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(4); + unhealthyContainer.setReplicaDelta(-1); + break; + + case MIS_REPLICATED: + case NEGATIVE_SIZE: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(3); + unhealthyContainer.setReplicaDelta(0); + break; + + case ALL_REPLICAS_BAD: + unhealthyContainer.setExpectedReplicaCount(3); + unhealthyContainer.setActualReplicaCount(0); + unhealthyContainer.setReplicaDelta(3); + break; + + default: + fail("Unhandled state: " + state.name() + ". Please add this state to the switch case."); + } + + unhealthyContainer.setContainerState(state.name()); + unhealthyContainer.setInStateSince(System.currentTimeMillis()); + + // Try inserting the record and catch any exception that occurs + Exception exception = null; + try { + containerHealthSchemaManager.insertUnhealthyContainerRecords( + Collections.singletonList(unhealthyContainer)); + } catch (Exception e) { + exception = e; + } + + // Assert no exception should be thrown for each state + assertNull(exception, + "Exception was thrown during insertion for state " + state.name() + + ": " + exception); + + // Optionally, verify the record was inserted correctly + List insertedRecords = + unHealthyContainersTableHandle.fetchByContainerId( + state.ordinal() + 1L); + assertFalse(insertedRecords.isEmpty(), + "Record was not inserted for state " + state.name() + "."); + assertEquals(insertedRecords.get(0).getContainerState(), state.name(), + "The inserted container state does not match for state " + + state.name() + "."); + } + } + @Test public void testNegativeSizeContainers() throws Exception { // Setup mock objects and test environment From c07b408bdfa315f3234f8bd32955ead8703d99ae Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Mon, 26 Aug 2024 14:16:26 -0700 Subject: [PATCH 006/106] HDDS-11208. Change RatisBlockOutputStream to use HDDS-11174. (#7072) --- .../scm/storage/AbstractCommitWatcher.java | 1 - .../hdds/scm/storage/BlockOutputStream.java | 75 ++++++++++--------- .../scm/storage/RatisBlockOutputStream.java | 4 +- 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java index 61bc73420e65..7641de1274d8 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java @@ -124,7 +124,6 @@ XceiverClientReply watchOnLastIndex() throws IOException { * * @param commitIndex log index to watch for * @return minimum commit index replicated to all nodes - * @throws IOException IOException in case watch gets timed out */ CompletableFuture watchForCommitAsync(long commitIndex) { final MemoizedSupplier> supplier diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index e88b097c4990..ca3e4e53743e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -375,10 +375,8 @@ private void doFlushOrWatchIfNeeded() throws IOException { } private void recordWatchForCommitAsync(CompletableFuture putBlockResultFuture) { - recordFlushFuture(watchForCommitAsync(putBlockResultFuture)); - } + final CompletableFuture flushFuture = putBlockResultFuture.thenCompose(x -> watchForCommit(x.commitIndex)); - private void recordFlushFuture(CompletableFuture flushFuture) { Preconditions.checkState(Thread.holdsLock(this)); this.lastFlushFuture = flushFuture; this.allPendingFlushFutures = allPendingFlushFutures.thenCombine(flushFuture, (last, curr) -> null); @@ -444,7 +442,8 @@ public synchronized void writeOnRetry(long len) throws IOException { writeChunk(buffer); putBlockFuture = executePutBlock(false, false); } - CompletableFuture watchForCommitAsync = watchForCommitAsync(putBlockFuture); + CompletableFuture watchForCommitAsync = + putBlockFuture.thenCompose(x -> watchForCommit(x.commitIndex)); try { watchForCommitAsync.get(); } catch (InterruptedException e) { @@ -477,33 +476,44 @@ void releaseBuffersOnException() { } /** - * Watch for a specific commit index. + * Send a watch request to wait until the given index became committed. + * When watch is not needed (e.g. EC), this is a NOOP. + * + * @param index the log index to wait for. + * @return the future of the reply. */ - XceiverClientReply sendWatchForCommit(long commitIndex) - throws IOException { - return null; + CompletableFuture sendWatchForCommit(long index) { + return CompletableFuture.completedFuture(null); } - private void watchForCommit(long commitIndex) throws IOException { - checkOpen(); + private CompletableFuture watchForCommit(long commitIndex) { try { - LOG.debug("Entering watchForCommit commitIndex = {}", commitIndex); - final XceiverClientReply reply = sendWatchForCommit(commitIndex); - if (reply != null) { - List dnList = reply.getDatanodes(); - if (!dnList.isEmpty()) { - Pipeline pipe = xceiverClient.getPipeline(); - - LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}", - blockID, pipe, dnList); - failedServers.addAll(dnList); - } - } - } catch (IOException ioe) { - setIoException(ioe); - throw getIoException(); + checkOpen(); + } catch (IOException e) { + throw new FlushRuntimeException(e); + } + + LOG.debug("Entering watchForCommit commitIndex = {}", commitIndex); + return sendWatchForCommit(commitIndex) + .thenAccept(this::checkReply) + .exceptionally(e -> { + throw new FlushRuntimeException(setIoException(e)); + }) + .whenComplete((r, e) -> LOG.debug("Leaving watchForCommit commitIndex = {}", commitIndex)); + } + + private void checkReply(XceiverClientReply reply) { + if (reply == null) { + return; + } + final List dnList = reply.getDatanodes(); + if (dnList.isEmpty()) { + return; } - LOG.debug("Leaving watchForCommit commitIndex = {}", commitIndex); + + LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}", + blockID, xceiverClient.getPipeline(), dnList); + failedServers.addAll(dnList); } void updateCommitInfo(XceiverClientReply reply, List buffers) { @@ -723,16 +733,6 @@ private synchronized CompletableFuture handleFlushInternalSynchronized(boo return lastFlushFuture; } - private CompletableFuture watchForCommitAsync(CompletableFuture putBlockResultFuture) { - return putBlockResultFuture.thenAccept(x -> { - try { - watchForCommit(x.commitIndex); - } catch (IOException e) { - throw new FlushRuntimeException(e); - } - }); - } - @Override public void close() throws IOException { if (xceiverClientFactory != null && xceiverClient != null) { @@ -771,7 +771,7 @@ void validateResponse( } - public void setIoException(Exception e) { + public IOException setIoException(Throwable e) { IOException ioe = getIoException(); if (ioe == null) { IOException exception = new IOException(EXCEPTION_MSG + e.toString(), e); @@ -782,6 +782,7 @@ public void setIoException(Exception e) { "so subsequent request also encounters " + "Storage Container Exception {}", ioe, e); } + return getIoException(); } void cleanup() { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java index d32c37eba6c3..0f95716bf9a4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java @@ -102,8 +102,8 @@ void releaseBuffersOnException() { } @Override - XceiverClientReply sendWatchForCommit(long commitIndex) throws IOException { - return commitWatcher.watchForCommit(commitIndex); + CompletableFuture sendWatchForCommit(long index) { + return commitWatcher.watchForCommitAsync(index); } @Override From be34303650fb0c8377beae5447a1b52ce0871b66 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Mon, 26 Aug 2024 16:05:27 -0700 Subject: [PATCH 007/106] HDDS-9198. Maintain local cache in OMSnapshotPurgeRequest to get updated snapshotInfo and pass the same to OMSnapshotPurgeResponse (#7045) --- .../snapshot/OMSnapshotPurgeRequest.java | 78 +++++++++---------- .../snapshot/OMSnapshotPurgeResponse.java | 8 +- 2 files changed, 36 insertions(+), 50 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 2a9cfa6baf0d..9b46aeef4c0f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -54,6 +55,13 @@ public class OMSnapshotPurgeRequest extends OMClientRequest { private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotPurgeRequest.class); + /** + * This map contains up to date snapshotInfo and works as a local cache for OMSnapshotPurgeRequest. + * Since purge and other updates happen in sequence inside validateAndUpdateCache, we can get updated snapshotInfo + * from this map rather than getting form snapshotInfoTable which creates a deep copy for every get call. + */ + private final Map updatedSnapshotInfos = new HashMap<>(); + public OMSnapshotPurgeRequest(OMRequest omRequest) { super(omRequest); } @@ -80,9 +88,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn try { List snapshotDbKeys = snapshotPurgeRequest .getSnapshotDBKeysList(); - Map updatedSnapInfos = new HashMap<>(); - Map updatedPathPreviousAndGlobalSnapshots = - new HashMap<>(); // Each snapshot purge operation does three things: // 1. Update the deep clean flag for the next active snapshot (So that it can be @@ -92,7 +97,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // There is no need to take lock for snapshot purge as of now. We can simply rely on OMStateMachine // because it executes transaction sequentially. for (String snapTableKey : snapshotDbKeys) { - SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable().get(snapTableKey); + SnapshotInfo fromSnapshot = getUpdatedSnapshotInfo(snapTableKey, omMetadataManager); if (fromSnapshot == null) { // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + @@ -104,10 +109,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); // Step 1: Update the deep clean flag for the next active snapshot - updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex, updatedSnapInfos); + updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex); // Step 2: Update the snapshot chain. - updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex, - updatedPathPreviousAndGlobalSnapshots); + updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex); // Remove and close snapshot's RocksDB instance from SnapshotCache. omSnapshotManager.invalidateCacheEntry(fromSnapshot.getSnapshotId()); // Step 3: Purge the snapshot from SnapshotInfoTable cache. @@ -115,14 +119,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); } - omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), - snapshotDbKeys, updatedSnapInfos, - updatedPathPreviousAndGlobalSnapshots); + omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos); omMetrics.incNumSnapshotPurges(); - LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating deep clean flags for " + - "snapshots: {} and global and previous for snapshots:{}.", - snapshotPurgeRequest, updatedSnapInfos.keySet(), updatedPathPreviousAndGlobalSnapshots.keySet()); + LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating snapshots:{}.", + snapshotPurgeRequest, updatedSnapshotInfos); } catch (IOException ex) { omClientResponse = new OMSnapshotPurgeResponse( createErrorOMResponse(omResponse, ex)); @@ -133,9 +134,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn return omClientResponse; } - private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, - OmMetadataManagerImpl omMetadataManager, long trxnLogIndex, - Map updatedSnapInfos) throws IOException { + private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManagerImpl omMetadataManager, + long trxnLogIndex) throws IOException { if (snapInfo != null) { // Setting next snapshot deep clean to false, Since the // current snapshot is deleted. We can potentially @@ -145,7 +145,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, // Update table cache first omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapInfo.getTableKey()), CacheValue.get(trxnLogIndex, snapInfo)); - updatedSnapInfos.put(snapInfo.getTableKey(), snapInfo); + updatedSnapshotInfos.put(snapInfo.getTableKey(), snapInfo); } } @@ -158,8 +158,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, private void updateSnapshotChainAndCache( OmMetadataManagerImpl metadataManager, SnapshotInfo snapInfo, - long trxnLogIndex, - Map updatedPathPreviousAndGlobalSnapshots + long trxnLogIndex ) throws IOException { if (snapInfo == null) { return; @@ -198,43 +197,36 @@ private void updateSnapshotChainAndCache( } SnapshotInfo nextPathSnapInfo = - nextPathSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextPathSnapshotKey) : null; + nextPathSnapshotKey != null ? getUpdatedSnapshotInfo(nextPathSnapshotKey, metadataManager) : null; - SnapshotInfo nextGlobalSnapInfo = - nextGlobalSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextGlobalSnapshotKey) : null; - - // Updates next path snapshot's previous snapshot ID if (nextPathSnapInfo != null) { nextPathSnapInfo.setPathPreviousSnapshotId(snapInfo.getPathPreviousSnapshotId()); metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(nextPathSnapInfo.getTableKey()), CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); } - // Updates next global snapshot's previous snapshot ID - // If both next global and path snapshot are same, it may overwrite - // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check - // will prevent it. - if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && - nextGlobalSnapInfo.getSnapshotId().equals(nextPathSnapInfo.getSnapshotId())) { - nextPathSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } else if (nextGlobalSnapInfo != null) { - nextGlobalSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); + SnapshotInfo nextGlobalSnapInfo = + nextGlobalSnapshotKey != null ? getUpdatedSnapshotInfo(nextGlobalSnapshotKey, metadataManager) : null; + + if (nextGlobalSnapInfo != null) { + nextGlobalSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(nextGlobalSnapInfo.getTableKey()), CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); } snapshotChainManager.deleteSnapshot(snapInfo); } + + private SnapshotInfo getUpdatedSnapshotInfo(String snapshotTableKey, OMMetadataManager omMetadataManager) + throws IOException { + SnapshotInfo snapshotInfo = updatedSnapshotInfos.get(snapshotTableKey); + + if (snapshotInfo == null) { + snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); + updatedSnapshotInfos.put(snapshotTableKey, snapshotInfo); + } + return snapshotInfo; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index ea9e68cc9ad9..139ce468e53d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -49,18 +49,15 @@ public class OMSnapshotPurgeResponse extends OMClientResponse { LoggerFactory.getLogger(OMSnapshotPurgeResponse.class); private final List snapshotDbKeys; private final Map updatedSnapInfos; - private final Map updatedPreviousAndGlobalSnapInfos; public OMSnapshotPurgeResponse( @Nonnull OMResponse omResponse, @Nonnull List snapshotDbKeys, - Map updatedSnapInfos, - Map updatedPreviousAndGlobalSnapInfos + Map updatedSnapInfos ) { super(omResponse); this.snapshotDbKeys = snapshotDbKeys; this.updatedSnapInfos = updatedSnapInfos; - this.updatedPreviousAndGlobalSnapInfos = updatedPreviousAndGlobalSnapInfos; } /** @@ -72,7 +69,6 @@ public OMSnapshotPurgeResponse(@Nonnull OMResponse omResponse) { checkStatusNotOK(); this.snapshotDbKeys = null; this.updatedSnapInfos = null; - this.updatedPreviousAndGlobalSnapInfos = null; } @Override @@ -82,8 +78,6 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) omMetadataManager; updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); - updateSnapInfo(metadataManager, batchOperation, - updatedPreviousAndGlobalSnapInfos); for (String dbKey: snapshotDbKeys) { // Skip the cache here because snapshot is purged from cache in OMSnapshotPurgeRequest. SnapshotInfo snapshotInfo = omMetadataManager From 0b75cb00beb6fa7df7467bcc89054d567be39ff5 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Tue, 27 Aug 2024 15:00:41 +0530 Subject: [PATCH 008/106] HDDS-11251. Deprecate definitions and remove listTrash and recoverTrash APIs (#7060) --- .../apache/hadoop/ozone/OzoneConfigKeys.java | 4 - .../src/main/resources/ozone-default.xml | 8 - .../ozone/client/protocol/ClientProtocol.java | 34 ----- .../hadoop/ozone/client/rpc/RpcClient.java | 20 --- .../java/org/apache/hadoop/ozone/OmUtils.java | 8 + .../om/protocol/OzoneManagerProtocol.java | 34 ----- ...ManagerProtocolClientSideTranslatorPB.java | 84 ----------- .../apache/hadoop/ozone/om/TestOmMetrics.java | 19 +-- .../src/main/proto/OmClientProtocol.proto | 44 +++--- .../hadoop/ozone/om/OMMetadataManager.java | 30 ---- .../apache/hadoop/ozone/om/KeyManager.java | 19 --- .../hadoop/ozone/om/KeyManagerImpl.java | 22 --- .../org/apache/hadoop/ozone/om/OMMetrics.java | 21 --- .../ozone/om/OmMetadataManagerImpl.java | 21 --- .../apache/hadoop/ozone/om/OzoneManager.java | 34 ----- .../OzoneManagerRequestHandler.java | 28 ---- .../hadoop/ozone/om/TestTrashService.java | 137 ------------------ .../ozone/client/ClientProtocolStub.java | 16 -- 18 files changed, 38 insertions(+), 545 deletions(-) delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index c61502ff4a8e..c2cdb4d0d847 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -535,10 +535,6 @@ public final class OzoneConfigKeys { public static final int OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT = 512; - public static final String OZONE_CLIENT_LIST_TRASH_KEYS_MAX = - "ozone.client.list.trash.keys.max"; - public static final int OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT = 1000; - public static final String OZONE_HTTP_BASEDIR = "ozone.http.basedir"; public static final String OZONE_HTTP_POLICY_KEY = diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 20c1bed89be8..e72b718e9f5e 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -3406,14 +3406,6 @@ unhealthy will each have their own limit. - - ozone.client.list.trash.keys.max - 1000 - OZONE, CLIENT - - The maximum number of keys to return for a list trash request. - - ozone.http.basedir diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 16211ebbb8eb..8d9614b554a8 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -59,7 +59,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.TenantStateList; @@ -514,39 +513,6 @@ List listKeys(String volumeName, String bucketName, String keyPrefix, String prevKey, int maxListResult) throws IOException; - /** - * List trash allows the user to list the keys that were marked as deleted, - * but not actually deleted by Ozone Manager. This allows a user to recover - * keys within a configurable window. - * @param volumeName - The volume name, which can also be a wild card - * using '*'. - * @param bucketName - The bucket name, which can also be a wild card - * using '*'. - * @param startKeyName - List keys from a specific key name. - * @param keyPrefix - List keys using a specific prefix. - * @param maxKeys - The number of keys to be returned. This must be below - * the cluster level set by admins. - * @return The list of keys that are deleted from the deleted table. - * @throws IOException - */ - List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, - int maxKeys) - throws IOException; - - /** - * Recover trash allows the user to recover keys that were marked as deleted, - * but not actually deleted by Ozone Manager. - * @param volumeName - The volume name. - * @param bucketName - The bucket name. - * @param keyName - The key user want to recover. - * @param destinationBucket - The bucket user want to recover to. - * @return The result of recovering operation is success or not. - * @throws IOException - */ - boolean recoverTrash(String volumeName, String bucketName, String keyName, - String destinationBucket) throws IOException; - /** * Get OzoneKey. * @param volumeName Name of the Volume diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index bfeb9c1e6c1f..f01fddf40f7d 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -123,7 +123,6 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; @@ -1771,25 +1770,6 @@ public List listKeys(String volumeName, String bucketName, } } - @Override - public List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException { - - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - - return ozoneManagerClient.listTrash(volumeName, bucketName, startKeyName, - keyPrefix, maxKeys); - } - - @Override - public boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException { - - return ozoneManagerClient.recoverTrash(volumeName, bucketName, keyName, - destinationBucket); - } - @Override public OzoneKeyDetails getKeyDetails( String volumeName, String bucketName, String keyName) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 11f176362a6b..27015d34a35a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -243,6 +243,10 @@ public static boolean isReadOnly( case ListKeys: case ListKeysLight: case ListTrash: + // ListTrash is deprecated by HDDS-11251. Keeping this in here + // As protobuf currently doesn't support deprecating enum fields + // TODO: Remove once migrated to proto3 and mark fields in proto + // as deprecated case ServiceList: case ListOpenFiles: case ListMultiPartUploadParts: @@ -304,6 +308,10 @@ public static boolean isReadOnly( case AddAcl: case PurgeKeys: case RecoverTrash: + // RecoverTrash is deprecated by HDDS-11251. Keeping this in here + // As protobuf currently doesn't support deprecating enum fields + // TODO: Remove once migrated to proto3 and mark fields in proto + // as deprecated case FinalizeUpgrade: case Prepare: case CancelPrepare: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 45922c107cbe..0f01761b17b1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -56,7 +56,6 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; @@ -1055,39 +1054,6 @@ DBUpdates getDBUpdates( OzoneManagerProtocolProtos.DBUpdatesRequest dbUpdatesRequest) throws IOException; - /** - * List trash allows the user to list the keys that were marked as deleted, - * but not actually deleted by Ozone Manager. This allows a user to recover - * keys within a configurable window. - * @param volumeName - The volume name, which can also be a wild card - * using '*'. - * @param bucketName - The bucket name, which can also be a wild card - * using '*'. - * @param startKeyName - List keys from a specific key name. - * @param keyPrefix - List keys using a specific prefix. - * @param maxKeys - The number of keys to be returned. This must be below - * the cluster level set by admins. - * @return The list of keys that are deleted from the deleted table. - * @throws IOException - */ - List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException; - - /** - * Recover trash allows the user to recover keys that were marked as deleted, - * but not actually deleted by Ozone Manager. - * @param volumeName - The volume name. - * @param bucketName - The bucket name. - * @param keyName - The key user want to recover. - * @param destinationBucket - The bucket user want to recover to. - * @return The result of recovering operation is success or not. - * @throws IOException - */ - default boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException { - return false; - } - /** * * @param txnApplyWaitTimeoutSeconds Max time in SECONDS to wait for all diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index f70beed5f253..d3e39550dfbc 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -72,7 +72,6 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; @@ -150,8 +149,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest; @@ -182,8 +179,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; @@ -2442,85 +2437,6 @@ public List listStatus(OmKeyArgs args, boolean recursive, return listStatus(args, recursive, startKey, numEntries, false); } - @Override - public List listTrash(String volumeName, - String bucketName, String startKeyName, String keyPrefix, int maxKeys) - throws IOException { - - Preconditions.checkArgument(Strings.isNullOrEmpty(volumeName), - "The volume name cannot be null or " + - "empty. Please enter a valid volume name or use '*' as a wild card"); - - Preconditions.checkArgument(Strings.isNullOrEmpty(bucketName), - "The bucket name cannot be null or " + - "empty. Please enter a valid bucket name or use '*' as a wild card"); - - ListTrashRequest trashRequest = ListTrashRequest.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setStartKeyName(startKeyName) - .setKeyPrefix(keyPrefix) - .setMaxKeys(maxKeys) - .build(); - - OMRequest omRequest = createOMRequest(Type.ListTrash) - .setListTrashRequest(trashRequest) - .build(); - - ListTrashResponse trashResponse = - handleError(submitRequest(omRequest)).getListTrashResponse(); - - List deletedKeyList = - new ArrayList<>(trashResponse.getDeletedKeysCount()); - - List list = new ArrayList<>(); - for (OzoneManagerProtocolProtos.RepeatedKeyInfo - repeatedKeyInfo : trashResponse.getDeletedKeysList()) { - RepeatedOmKeyInfo fromProto = - RepeatedOmKeyInfo.getFromProto(repeatedKeyInfo); - list.add(fromProto); - } - deletedKeyList.addAll(list); - - return deletedKeyList; - } - - @Override - public boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException { - - Preconditions.checkArgument(Strings.isNullOrEmpty(volumeName), - "The volume name cannot be null or empty. " + - "Please enter a valid volume name."); - - Preconditions.checkArgument(Strings.isNullOrEmpty(bucketName), - "The bucket name cannot be null or empty. " + - "Please enter a valid bucket name."); - - Preconditions.checkArgument(Strings.isNullOrEmpty(keyName), - "The key name cannot be null or empty. " + - "Please enter a valid key name."); - - Preconditions.checkArgument(Strings.isNullOrEmpty(destinationBucket), - "The destination bucket name cannot be null or empty. " + - "Please enter a valid destination bucket name."); - - RecoverTrashRequest.Builder req = RecoverTrashRequest.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDestinationBucket(destinationBucket); - - OMRequest omRequest = createOMRequest(Type.RecoverTrash) - .setRecoverTrashRequest(req) - .build(); - - RecoverTrashResponse recoverResponse = - handleError(submitRequest(omRequest)).getRecoverTrashResponse(); - - return recoverResponse.getResponse(); - } - @Override public long prepareOzoneManager( long txnApplyWaitTimeoutSeconds, long txnApplyCheckIntervalSeconds) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 4619af1baa29..0481ee4a867c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -338,7 +338,6 @@ public void testKeyOps() throws Exception { long initialNumKeyLookup = getLongCounter("NumKeyLookup", omMetrics); long initialNumKeyDeletes = getLongCounter("NumKeyDeletes", omMetrics); long initialNumKeyLists = getLongCounter("NumKeyLists", omMetrics); - long initialNumTrashKeyLists = getLongCounter("NumTrashKeyLists", omMetrics); long initialNumKeys = getLongCounter("NumKeys", omMetrics); long initialNumInitiateMultipartUploads = getLongCounter("NumInitiateMultipartUploads", omMetrics); @@ -346,7 +345,6 @@ public void testKeyOps() throws Exception { long initialNumKeyAllocateFails = getLongCounter("NumKeyAllocateFails", omMetrics); long initialNumKeyLookupFails = getLongCounter("NumKeyLookupFails", omMetrics); long initialNumKeyDeleteFails = getLongCounter("NumKeyDeleteFails", omMetrics); - long initialNumTrashKeyListFails = getLongCounter("NumTrashKeyListFails", omMetrics); long initialNumInitiateMultipartUploadFails = getLongCounter("NumInitiateMultipartUploadFails", omMetrics); long initialNumBlockAllocationFails = getLongCounter("NumBlockAllocationFails", omMetrics); long initialNumKeyListFails = getLongCounter("NumKeyListFails", omMetrics); @@ -356,16 +354,15 @@ public void testKeyOps() throws Exception { TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); - doKeyOps(keyArgs); + doKeyOps(keyArgs); // This will perform 7 different operations on the key omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 8, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 7, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 1, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 1, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics)); assertEquals(initialNumKeyLists + 1, getLongCounter("NumKeyLists", omMetrics)); - assertEquals(initialNumTrashKeyLists + 1, getLongCounter("NumTrashKeyLists", omMetrics)); assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); assertEquals(initialNumInitiateMultipartUploads + 1, getLongCounter("NumInitiateMultipartUploads", omMetrics)); @@ -409,8 +406,6 @@ public void testKeyOps() throws Exception { doThrow(exception).when(mockKm).lookupKey(any(), any(), any()); doThrow(exception).when(mockKm).listKeys( any(), any(), any(), any(), anyInt()); - doThrow(exception).when(mockKm).listTrash( - any(), any(), any(), any(), anyInt()); OmMetadataReader omMetadataReader = (OmMetadataReader) ozoneManager.getOmMetadataReader().get(); HddsWhiteboxTestUtils.setInternalState( @@ -426,19 +421,17 @@ public void testKeyOps() throws Exception { doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 31, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 28, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 6, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 3, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 4, getLongCounter("NumKeyDeletes", omMetrics)); assertEquals(initialNumKeyLists + 3, getLongCounter("NumKeyLists", omMetrics)); - assertEquals(initialNumTrashKeyLists + 3, getLongCounter("NumTrashKeyLists", omMetrics)); assertEquals(initialNumInitiateMultipartUploads + 3, getLongCounter("NumInitiateMultipartUploads", omMetrics)); assertEquals(initialNumKeyAllocateFails + 1, getLongCounter("NumKeyAllocateFails", omMetrics)); assertEquals(initialNumKeyLookupFails + 1, getLongCounter("NumKeyLookupFails", omMetrics)); assertEquals(initialNumKeyDeleteFails + 1, getLongCounter("NumKeyDeleteFails", omMetrics)); assertEquals(initialNumKeyListFails + 1, getLongCounter("NumKeyListFails", omMetrics)); - assertEquals(initialNumTrashKeyListFails + 1, getLongCounter("NumTrashKeyListFails", omMetrics)); assertEquals(initialNumInitiateMultipartUploadFails + 1, getLongCounter( "NumInitiateMultipartUploadFails", omMetrics)); assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); @@ -843,12 +836,6 @@ private void doKeyOps(OmKeyArgs keyArgs) { } catch (IOException ignored) { } - try { - ozoneManager.listTrash(keyArgs.getVolumeName(), - keyArgs.getBucketName(), null, null, 0); - } catch (IOException ignored) { - } - try { writeClient.deleteKey(keyArgs); } catch (IOException ignored) { diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 9e0f729be40f..eeddc5500527 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -97,8 +97,9 @@ enum Type { ListMultipartUploads = 82; - ListTrash = 91; - RecoverTrash = 92; + // Not used anymore due to HDDS-11251 + ListTrash = 91; // [deprecated = true] + RecoverTrash = 92; // [deprecated = true] RevokeS3Secret = 93; @@ -233,8 +234,9 @@ message OMRequest { optional UpdateGetS3SecretRequest updateGetS3SecretRequest = 82; optional ListMultipartUploadsRequest listMultipartUploadsRequest = 83; - optional ListTrashRequest listTrashRequest = 91; - optional RecoverTrashRequest RecoverTrashRequest = 92; + // Not used anymore due to HDDS-11251 + optional ListTrashRequest listTrashRequest = 91 [deprecated = true]; + optional RecoverTrashRequest RecoverTrashRequest = 92 [deprecated = true]; optional RevokeS3SecretRequest RevokeS3SecretRequest = 93; @@ -362,8 +364,10 @@ message OMResponse { optional ListMultipartUploadsResponse listMultipartUploadsResponse = 82; - optional ListTrashResponse listTrashResponse = 91; - optional RecoverTrashResponse RecoverTrashResponse = 92; + // Not used anymore due to HDDS-11251 + optional ListTrashResponse listTrashResponse = 91 [deprecated = true]; + optional RecoverTrashResponse RecoverTrashResponse = 92 [deprecated = true]; + optional PurgePathsResponse purgePathsResponse = 93 [deprecated = true]; optional PurgeDirectoriesResponse purgeDirectoriesResponse = 108; @@ -548,33 +552,39 @@ enum Status { /** This command acts as a list command for deleted keys that are still present in the deleted table on Ozone Manager. + Not used anymore due to HDDS-11251 */ message ListTrashRequest { - required string volumeName = 1; - required string bucketName = 2; - optional string startKeyName = 3; - optional string keyPrefix = 4; - optional int32 maxKeys = 5; + // option deprecated = true; + required string volumeName = 1 [deprecated = true]; + required string bucketName = 2 [deprecated = true]; + optional string startKeyName = 3 [deprecated = true]; + optional string keyPrefix = 4 [deprecated = true]; + optional int32 maxKeys = 5 [deprecated = true]; } message ListTrashResponse { - repeated RepeatedKeyInfo deletedKeys = 1; + // option deprecated = true; + repeated RepeatedKeyInfo deletedKeys = 1 [deprecated = true]; } /** This command acts as a recover command for deleted keys that are still in deleted table on Ozone Manager. + Not used anymore due to HDDS-11251 */ message RecoverTrashRequest { - required string volumeName = 1; - required string bucketName = 2; - required string keyName = 3; - required string destinationBucket = 4; + // option deprecated = true; + required string volumeName = 1 [deprecated = true]; + required string bucketName = 2 [deprecated = true]; + required string keyName = 3 [deprecated = true]; + required string destinationBucket = 4 [deprecated = true]; } message RecoverTrashResponse { - required bool response = 1; + // option deprecated = true; + required bool response = 1 [deprecated = true]; } message VolumeInfo { diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index fb34d19a8bb1..cf0819ca527c 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -262,24 +262,6 @@ ListKeysResult listKeys(String volumeName, int maxKeys) throws IOException; - /** - * List trash allows the user to list the keys that were marked as deleted, - * but not actually deleted by Ozone Manager. This allows a user to recover - * keys within a configurable window. - * @param volumeName - The volume name, which can also be a wild card - * using '*'. - * @param bucketName - The bucket name, which can also be a wild card - * using '*'. - * @param startKeyName - List keys from a specific key name. - * @param keyPrefix - List keys using a specific prefix. - * @param maxKeys - The number of keys to be returned. This must be below - * the cluster level set by admins. - * @return The list of keys that are deleted from the deleted table. - * @throws IOException - */ - List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException; - /** * Returns snapshot info for volume/bucket snapshot path. * @param volumeName volume name @@ -304,18 +286,6 @@ ListSnapshotResponse listSnapshot( String volumeName, String bucketName, String snapshotPrefix, String prevSnapshot, int maxListResult) throws IOException; - /** - * Recover trash allows the user to recover the keys - * that were marked as deleted, but not actually deleted by Ozone Manager. - * @param volumeName - The volume name. - * @param bucketName - The bucket name. - * @param keyName - The key user want to recover. - * @param destinationBucket - The bucket user want to recover to. - * @return The result of recovering operation is success or not. - */ - boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException; - /** * Returns a list of volumes owned by a given user; if user is null, returns * all volumes. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 7a3312c0685a..b7fa5d746fb0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -28,7 +28,6 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; @@ -107,24 +106,6 @@ ListKeysResult listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException; - /** - * List trash allows the user to list the keys that were marked as deleted, - * but not actually deleted by Ozone Manager. This allows a user to recover - * keys within a configurable window. - * @param volumeName - The volume name, which can also be a wild card - * using '*'. - * @param bucketName - The bucket name, which can also be a wild card - * using '*'. - * @param startKeyName - List keys from a specific key name. - * @param keyPrefix - List keys using a specific prefix. - * @param maxKeys - The number of keys to be returned. This must be below - * the cluster level set by admins. - * @return The list of keys that are deleted from the deleted table. - * @throws IOException - */ - List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException; - /** * Returns a PendingKeysDeletion. It has a list of pending deletion key info * that ups to the given count.Each entry is a {@link BlockGroup}, which diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 2cb55135294a..6d276d95284e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -85,7 +85,6 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; @@ -122,8 +121,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; @@ -182,7 +179,6 @@ public class KeyManagerImpl implements KeyManager { private final ScmClient scmClient; private final OMMetadataManager metadataManager; private final long scmBlockSize; - private final int listTrashKeysMax; private final OzoneBlockTokenSecretManager secretManager; private final boolean grpcBlockTokenEnabled; @@ -218,9 +214,6 @@ public KeyManagerImpl(OzoneManager om, ScmClient scmClient, this.grpcBlockTokenEnabled = conf.getBoolean( HDDS_BLOCK_TOKEN_ENABLED, HDDS_BLOCK_TOKEN_ENABLED_DEFAULT); - this.listTrashKeysMax = conf.getInt( - OZONE_CLIENT_LIST_TRASH_KEYS_MAX, - OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT); this.enableFileSystemPaths = conf.getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); @@ -660,21 +653,6 @@ public ListKeysResult listKeys(String volumeName, String bucketName, return listKeysResult; } - @Override - public List listTrash(String volumeName, - String bucketName, String startKeyName, String keyPrefix, - int maxKeys) throws IOException { - - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkArgument(maxKeys <= listTrashKeysMax, - "The max keys limit specified is not less than the cluster " + - "allowed maximum limit."); - - return metadataManager.listTrash(volumeName, bucketName, - startKeyName, keyPrefix, maxKeys); - } - @Override public PendingKeysDeletion getPendingDeletionKeys(final int count) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 1c0ec78cfb22..cbe5205c10ba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -61,7 +61,6 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numKeyDeletes; private @Metric MutableCounterLong numBucketLists; private @Metric MutableCounterLong numKeyLists; - private @Metric MutableCounterLong numTrashKeyLists; private @Metric MutableCounterLong numVolumeLists; private @Metric MutableCounterLong numKeyCommits; private @Metric MutableCounterLong numKeyHSyncs; @@ -120,7 +119,6 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numKeyDeleteFails; private @Metric MutableCounterLong numBucketListFails; private @Metric MutableCounterLong numKeyListFails; - private @Metric MutableCounterLong numTrashKeyListFails; private @Metric MutableCounterLong numVolumeListFails; private @Metric MutableCounterLong numKeyCommitFails; private @Metric MutableCounterLong numBlockAllocationFails; @@ -420,11 +418,6 @@ public void incNumKeyLists() { numKeyLists.incr(); } - public void incNumTrashKeyLists() { - numKeyOps.incr(); - numTrashKeyLists.incr(); - } - public void incNumVolumeLists() { numVolumeOps.incr(); numVolumeLists.incr(); @@ -836,10 +829,6 @@ public void incNumKeyListFails() { numKeyListFails.incr(); } - public void incNumTrashKeyListFails() { - numTrashKeyListFails.incr(); - } - public void incNumVolumeListFails() { numVolumeListFails.incr(); } @@ -994,11 +983,6 @@ public long getNumKeyLists() { return numKeyLists.value(); } - @VisibleForTesting - public long getNumTrashKeyLists() { - return numTrashKeyLists.value(); - } - @VisibleForTesting public long getNumGetServiceLists() { return numGetServiceLists.value(); @@ -1099,11 +1083,6 @@ public long getNumKeyListFails() { return numKeyListFails.value(); } - @VisibleForTesting - public long getNumTrashKeyListFails() { - return numTrashKeyListFails.value(); - } - @VisibleForTesting public long getNumFSOps() { return numFSOps.value(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 22d2b1e50b2a..ee92dbc2fde9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -1385,15 +1385,6 @@ public ListKeysResult listKeys(String volumeName, String bucketName, return new ListKeysResult(result, isTruncated); } - // TODO: HDDS-2419 - Complete stub below for core logic - @Override - public List listTrash(String volumeName, String bucketName, - String startKeyName, String keyPrefix, int maxKeys) throws IOException { - - List deletedKeys = new ArrayList<>(); - return deletedKeys; - } - @Override public SnapshotInfo getSnapshotInfo(String volumeName, String bucketName, String snapshotName) throws IOException { @@ -1470,18 +1461,6 @@ public ListSnapshotResponse listSnapshot( return new ListSnapshotResponse(snapshotInfos, lastSnapshot); } - @Override - public boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException { - - /* TODO: HDDS-2425 and HDDS-2426 - core logic stub would be added in later patch. - */ - - boolean recoverOperation = true; - return recoverOperation; - } - /** * @param userName volume owner, null for listing all volumes. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index a514262cae29..2fb15ec3d609 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -172,7 +172,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; @@ -2969,39 +2968,6 @@ public ListKeysLightResult listKeysLight(String volumeName, return new ListKeysLightResult(basicKeysList, listKeysResult.isTruncated()); } - @Override - public List listTrash(String volumeName, - String bucketName, String startKeyName, String keyPrefix, int maxKeys) - throws IOException { - boolean auditSuccess = true; - Map auditMap = buildAuditMap(volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); - auditMap.put(OzoneConsts.START_KEY, startKeyName); - auditMap.put(OzoneConsts.KEY_PREFIX, keyPrefix); - auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys)); - try { - if (isAclEnabled) { - omMetadataReader.checkAcls(ResourceType.BUCKET, - StoreType.OZONE, ACLType.LIST, - volumeName, bucketName, keyPrefix); - } - metrics.incNumTrashKeyLists(); - return keyManager.listTrash(volumeName, bucketName, - startKeyName, keyPrefix, maxKeys); - } catch (IOException ex) { - metrics.incNumTrashKeyListFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_TRASH, - auditMap, ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_TRASH, - auditMap)); - } - } - } - @Override public SnapshotInfo getSnapshotInfo(String volumeName, String bucketName, String snapshotName) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index a5e94689aeed..f6bd7cca139b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -63,7 +63,6 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; @@ -113,8 +112,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyRequest; @@ -238,11 +235,6 @@ public OMResponse handleReadRequest(OMRequest request) { request.getListKeysRequest()); responseBuilder.setListKeysLightResponse(listKeysLightResponse); break; - case ListTrash: - ListTrashResponse listTrashResponse = listTrash( - request.getListTrashRequest(), request.getVersion()); - responseBuilder.setListTrashResponse(listTrashResponse); - break; case ListMultiPartUploadParts: MultipartUploadListPartsResponse listPartsResponse = listParts(request.getListMultipartUploadPartsRequest()); @@ -835,26 +827,6 @@ public static OMResponse disallowListKeysWithBucketLayout( return resp; } - private ListTrashResponse listTrash(ListTrashRequest request, - int clientVersion) throws IOException { - - ListTrashResponse.Builder resp = - ListTrashResponse.newBuilder(); - - List deletedKeys = impl.listTrash( - request.getVolumeName(), - request.getBucketName(), - request.getStartKeyName(), - request.getKeyPrefix(), - request.getMaxKeys()); - - for (RepeatedOmKeyInfo key: deletedKeys) { - resp.addDeletedKeys(key.getProto(false, clientVersion)); - } - - return resp.build(); - } - @RequestFeatureValidator( conditions = ValidationCondition.OLDER_CLIENT_REQUESTS, processingPhase = RequestProcessingPhase.POST_PROCESS, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java deleted file mode 100644 index 4f0c15f15e53..000000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om; - - -import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.util.ExitUtils; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collections; - -/** - * Test Key Trash Service. - *

- * This test does the things including: - * 1. UTs for list trash. - * 2. UTs for recover trash. - * 3. UTs for empty trash. - *

- */ -public class TestTrashService { - - @TempDir - private Path tempFolder; - - private KeyManager keyManager; - private OzoneManagerProtocol writeClient; - private OzoneManager om; - private String volumeName; - private String bucketName; - - @BeforeEach - void setup() throws Exception { - ExitUtils.disableSystemExit(); - OzoneConfiguration configuration = new OzoneConfiguration(); - - File folder = tempFolder.toFile(); - if (!folder.exists()) { - assertTrue(folder.mkdirs()); - } - System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); - ServerUtils.setOzoneMetaDirPath(configuration, folder.toString()); - - OmTestManagers omTestManagers - = new OmTestManagers(configuration); - keyManager = omTestManagers.getKeyManager(); - writeClient = omTestManagers.getWriteClient(); - om = omTestManagers.getOzoneManager(); - volumeName = "volume"; - bucketName = "bucket"; - } - - @AfterEach - public void cleanup() throws Exception { - om.stop(); - } - - @Test - public void testRecoverTrash() throws IOException { - String keyName = "testKey"; - String destinationBucket = "destBucket"; - createAndDeleteKey(keyName); - - boolean recoverOperation = keyManager.getMetadataManager() - .recoverTrash(volumeName, bucketName, keyName, destinationBucket); - assertTrue(recoverOperation); - } - - private void createAndDeleteKey(String keyName) throws IOException { - - OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), - OmVolumeArgs.newBuilder() - .setOwnerName("owner") - .setAdminName("admin") - .setVolume(volumeName) - .build()); - - OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), - OmBucketInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .build()); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setAcls(Collections.emptyList()) - .setLocationInfoList(new ArrayList<>()) - .setReplicationConfig(StandaloneReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.ONE)) - .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName()) - .build(); - - /* Create and delete key in the Key Manager. */ - OpenKeySession session = writeClient.openKey(keyArgs); - writeClient.commitKey(keyArgs, session.getId()); - writeClient.deleteKey(keyArgs); - } - -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 41876c6e2454..e3e3537b1c3b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -43,7 +43,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.TenantStateList; @@ -301,21 +300,6 @@ public List listKeys(String volumeName, String bucketName, return null; } - @Override - public List listTrash(String volumeName, String bucketName, - String startKeyName, - String keyPrefix, int maxKeys) - throws IOException { - return null; - } - - @Override - public boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) - throws IOException { - return false; - } - @Override public OzoneKeyDetails getKeyDetails(String volumeName, String bucketName, String keyName) throws IOException { From 2e30dc182c9c3e50b0c023f5a4915fa6a88e5eb8 Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Tue, 27 Aug 2024 15:27:39 +0530 Subject: [PATCH 009/106] HDDS-11190. Add --fields option to ldb scan command (#6976) --- .../apache/hadoop/ozone/debug/DBScanner.java | 122 +++++++++++++++++- .../hadoop/ozone/debug/ValueSchema.java | 17 +-- 2 files changed, 126 insertions(+), 13 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java index 0c38fbe33ba1..4653aa3eeb31 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java @@ -55,9 +55,11 @@ import java.io.BufferedWriter; import java.io.IOException; import java.io.PrintWriter; +import java.lang.reflect.Field; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -121,6 +123,11 @@ public class DBScanner implements Callable, SubcommandWithParent { description = "Key at which iteration of the DB ends") private String endKey; + @CommandLine.Option(names = {"--fields"}, + description = "Comma-separated list of fields needed for each value. " + + "eg.) \"name,acls.type\" for showing name and type under acls.") + private String fieldsFilter; + @CommandLine.Option(names = {"--dnSchema", "--dn-schema", "-d"}, description = "Datanode DB Schema Version: V1/V2/V3", defaultValue = "V3") @@ -291,7 +298,7 @@ private void processRecords(ManagedRocksIterator iterator, } Future future = threadPool.submit( new Task(dbColumnFamilyDef, batch, logWriter, sequenceId, - withKey, schemaV3)); + withKey, schemaV3, fieldsFilter)); futures.add(future); batch = new ArrayList<>(batchSize); sequenceId++; @@ -299,7 +306,7 @@ private void processRecords(ManagedRocksIterator iterator, } if (!batch.isEmpty()) { Future future = threadPool.submit(new Task(dbColumnFamilyDef, - batch, logWriter, sequenceId, withKey, schemaV3)); + batch, logWriter, sequenceId, withKey, schemaV3, fieldsFilter)); futures.add(future); } @@ -465,22 +472,51 @@ private static class Task implements Callable { private final long sequenceId; private final boolean withKey; private final boolean schemaV3; + private String valueFields; Task(DBColumnFamilyDefinition dbColumnFamilyDefinition, ArrayList batch, LogWriter logWriter, - long sequenceId, boolean withKey, boolean schemaV3) { + long sequenceId, boolean withKey, boolean schemaV3, String valueFields) { this.dbColumnFamilyDefinition = dbColumnFamilyDefinition; this.batch = batch; this.logWriter = logWriter; this.sequenceId = sequenceId; this.withKey = withKey; this.schemaV3 = schemaV3; + this.valueFields = valueFields; + } + + Map getFieldSplit(List fields, Map fieldMap) { + int len = fields.size(); + if (fieldMap == null) { + fieldMap = new HashMap<>(); + } + if (len == 1) { + fieldMap.putIfAbsent(fields.get(0), null); + } else { + Map fieldMapGet = (Map) fieldMap.get(fields.get(0)); + if (fieldMapGet == null) { + fieldMap.put(fields.get(0), getFieldSplit(fields.subList(1, len), null)); + } else { + fieldMap.put(fields.get(0), getFieldSplit(fields.subList(1, len), fieldMapGet)); + } + } + return fieldMap; } @Override public Void call() { try { ArrayList results = new ArrayList<>(batch.size()); + Map fieldsSplitMap = new HashMap<>(); + + if (valueFields != null) { + for (String field : valueFields.split(",")) { + String[] subfields = field.split("\\."); + fieldsSplitMap = getFieldSplit(Arrays.asList(subfields), fieldsSplitMap); + } + } + for (ByteArrayKeyValue byteArrayKeyValue : batch) { StringBuilder sb = new StringBuilder(); if (!(sequenceId == FIRST_SEQUENCE_ID && results.isEmpty())) { @@ -515,16 +551,92 @@ public Void call() { Object o = dbColumnFamilyDefinition.getValueCodec() .fromPersistedFormat(byteArrayKeyValue.getValue()); - sb.append(WRITER.writeValueAsString(o)); + + if (valueFields != null) { + Map filteredValue = new HashMap<>(); + filteredValue.putAll(getFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsSplitMap)); + sb.append(WRITER.writeValueAsString(filteredValue)); + } else { + sb.append(WRITER.writeValueAsString(o)); + } + results.add(sb.toString()); } logWriter.log(results, sequenceId); - } catch (Exception e) { + } catch (IOException e) { exception = true; LOG.error("Exception parse Object", e); } return null; } + + Map getFilteredObject(Object obj, Class clazz, Map fieldsSplitMap) { + Map valueMap = new HashMap<>(); + for (Map.Entry field : fieldsSplitMap.entrySet()) { + try { + Field valueClassField = getRequiredFieldFromAllFields(clazz, field.getKey()); + Object valueObject = valueClassField.get(obj); + Map subfields = (Map) field.getValue(); + + if (subfields == null) { + valueMap.put(field.getKey(), valueObject); + } else { + if (Collection.class.isAssignableFrom(valueObject.getClass())) { + List subfieldObjectsList = + getFilteredObjectCollection((Collection) valueObject, subfields); + valueMap.put(field.getKey(), subfieldObjectsList); + } else if (Map.class.isAssignableFrom(valueObject.getClass())) { + Map subfieldObjectsMap = new HashMap<>(); + Map valueObjectMap = (Map) valueObject; + for (Map.Entry ob : valueObjectMap.entrySet()) { + Object subfieldValue; + if (Collection.class.isAssignableFrom(ob.getValue().getClass())) { + subfieldValue = getFilteredObjectCollection((Collection)ob.getValue(), subfields); + } else { + subfieldValue = getFilteredObject(ob.getValue(), ob.getValue().getClass(), subfields); + } + subfieldObjectsMap.put(ob.getKey(), subfieldValue); + } + valueMap.put(field.getKey(), subfieldObjectsMap); + } else { + valueMap.put(field.getKey(), + getFilteredObject(valueObject, valueClassField.getType(), subfields)); + } + } + } catch (NoSuchFieldException ex) { + err().println("ERROR: no such field: " + field); + } catch (IllegalAccessException e) { + err().println("ERROR: Cannot get field from object: " + field); + } + } + return valueMap; + } + + List getFilteredObjectCollection(Collection valueObject, Map fields) + throws NoSuchFieldException, IllegalAccessException { + List subfieldObjectsList = new ArrayList<>(); + for (Object ob : valueObject) { + Object subfieldValue = getFilteredObject(ob, ob.getClass(), fields); + subfieldObjectsList.add(subfieldValue); + } + return subfieldObjectsList; + } + + Field getRequiredFieldFromAllFields(Class clazz, String fieldName) throws NoSuchFieldException { + List classFieldList = ValueSchema.getAllFields(clazz); + Field classField = null; + for (Field f : classFieldList) { + if (f.getName().equals(fieldName)) { + classField = f; + break; + } + } + if (classField == null) { + throw new NoSuchFieldException(); + } + classField.setAccessible(true); + return classField; + } } private static class ByteArrayKeyValue { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java index a5029b3e6b90..b06be2aff534 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ValueSchema.java @@ -88,7 +88,7 @@ public Void call() throws Exception { String dbPath = parent.getDbPath(); Map fields = new HashMap<>(); - success = getValueFields(dbPath, fields); + success = getValueFields(dbPath, fields, depth, tableName, dnDBSchemaVersion); out().println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(fields)); @@ -101,7 +101,8 @@ public Void call() throws Exception { return null; } - private boolean getValueFields(String dbPath, Map valueSchema) { + public static boolean getValueFields(String dbPath, Map valueSchema, int d, String table, + String dnDBSchemaVersion) { dbPath = removeTrailingSlashIfNeeded(dbPath); DBDefinitionFactory.setDnDBSchemaVersion(dnDBSchemaVersion); @@ -111,19 +112,19 @@ private boolean getValueFields(String dbPath, Map valueSchema) { return false; } final DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(tableName); + dbDefinition.getColumnFamily(table); if (columnFamilyDefinition == null) { - err().print("Error: Table with name '" + tableName + "' not found"); + err().print("Error: Table with name '" + table + "' not found"); return false; } Class c = columnFamilyDefinition.getValueType(); - valueSchema.put(c.getSimpleName(), getFieldsStructure(c, depth)); + valueSchema.put(c.getSimpleName(), getFieldsStructure(c, d)); return true; } - private Object getFieldsStructure(Class clazz, int currentDepth) { + private static Object getFieldsStructure(Class clazz, int currentDepth) { if (clazz.isPrimitive() || String.class.equals(clazz)) { return clazz.getSimpleName(); } else if (currentDepth == 0) { @@ -148,7 +149,7 @@ private Object getFieldsStructure(Class clazz, int currentDepth) { } } - private List getAllFields(Class clazz) { + public static List getAllFields(Class clazz) { // NOTE: Schema of interface type, like ReplicationConfig, cannot be fetched. // An empty list "[]" will be shown for such types of fields. if (clazz == null) { @@ -176,7 +177,7 @@ public Class getParentType() { return RDBParser.class; } - private String removeTrailingSlashIfNeeded(String dbPath) { + private static String removeTrailingSlashIfNeeded(String dbPath) { if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { dbPath = dbPath.substring(0, dbPath.length() - 1); } From 2236041f3aa4742917a98e99c54d079e0c243e8d Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 27 Aug 2024 16:48:47 +0200 Subject: [PATCH 010/106] HDDS-11365. Fix the NOTICE file (#7120) --- NOTICE.txt | 2 +- dev-support/pom.xml | 98 +++++++++++++++++++ hadoop-hdds/pom.xml | 24 +++++ .../dist/src/main/assemblies/ozone-src.xml | 7 ++ hadoop-ozone/pom.xml | 23 +++++ pom.xml | 20 +--- 6 files changed, 154 insertions(+), 20 deletions(-) create mode 100644 dev-support/pom.xml diff --git a/NOTICE.txt b/NOTICE.txt index 7a1e855f6a33..cc4e3c58b39f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,5 @@ Apache Ozone -Copyright 2022 The Apache Software Foundation +Copyright 2024 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/dev-support/pom.xml b/dev-support/pom.xml new file mode 100644 index 000000000000..e11e3b32ee44 --- /dev/null +++ b/dev-support/pom.xml @@ -0,0 +1,98 @@ + + + + + ozone-main + org.apache.ozone + 1.5.0-SNAPSHOT + + 4.0.0 + ozone-dev-support + Helper module for sharing resources among projects + Apache Ozone Dev Support + + + false + + + + + ${project.build.directory}/extra-resources + META-INF + + LICENSE.txt + NOTICE.txt + + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + org.apache.maven.plugins + maven-resources-plugin + + + copy-resources + validate + + copy-resources + + + ${project.build.directory}/extra-resources + + + ../ + + LICENSE.txt + NOTICE.txt + + + + + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + process-resources + + bundle + + + + + ${project.build.outputDirectory} + + META-INF/LICENSE.txt + META-INF/NOTICE.txt + + + + + + diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index a863fe3ef5d1..87d761583014 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -247,6 +247,30 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + org.apache.ozone:ozone-dev-support:${ozone.version} + + + + + org.apache.ozone + ozone-dev-support + ${ozone.version} + + + + + + process + + + + diff --git a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml index 3c97d3add766..3450b3873937 100644 --- a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml +++ b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml @@ -67,6 +67,13 @@ dev-support true + + **/.classpath + **/.project + **/.settings + **/*.iml + **/target/** + hadoop-hdds diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index e262895664fc..d91d488c4345 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -342,6 +342,29 @@ + + org.apache.maven.plugins + maven-remote-resources-plugin + + + org.apache.ozone:ozone-dev-support:${ozone.version} + + + + + org.apache.ozone + ozone-dev-support + ${ozone.version} + + + + + + process + + + + diff --git a/pom.xml b/pom.xml index 4a8cac1bb50f..264cd993b804 100644 --- a/pom.xml +++ b/pom.xml @@ -24,6 +24,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs pom + dev-support hadoop-hdds hadoop-ozone @@ -1708,25 +1709,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.maven.plugins maven-remote-resources-plugin ${maven-remote-resources-plugin.version} - - - org.apache.hadoop:hadoop-build-tools:${hadoop.version} - - - - - org.apache.hadoop - hadoop-build-tools - ${hadoop.version} - - - - - - process - - - org.apache.maven.plugins From fab56b44a003b0085af244df566e71b3d8569e04 Mon Sep 17 00:00:00 2001 From: Smita <112169209+smitajoshi12@users.noreply.github.com> Date: Wed, 28 Aug 2024 01:21:32 +0530 Subject: [PATCH 011/106] HDDS-11229. Chain optionals in Recon Insight (#7064) --- .../ozone-recon-web/src/views/insights/insights.tsx | 8 +++----- .../recon/ozone-recon-web/src/views/insights/om/om.tsx | 10 +++++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx index f273f758ea96..63f095ff7cae 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/insights.tsx @@ -101,9 +101,7 @@ export class Insights extends React.Component, IInsightsS // Disable bucket selection dropdown if more than one volume is selected // If there is only one volume, bucket selection dropdown should not be disabled. const isBucketSelectionDisabled = !selectedVolumes || - (selectedVolumes && - (selectedVolumes.length > 1 && - (volumeBucketMap.size !== 1))); + (selectedVolumes?.length > 1 && volumeBucketMap.size !== 1); let bucketOptions: IOption[] = []; // When volume is changed and more than one volume is selected, // selected buckets value should be reset to all buckets @@ -455,7 +453,7 @@ export class Insights extends React.Component, IInsightsS
{isLoading ? Loading... : - ((fileCountsResponse && fileCountsResponse.length > 0) ? + ((fileCountsResponse?.length > 0) ?
@@ -506,7 +504,7 @@ export class Insights extends React.Component, IInsightsS
{isLoading ? Loading... : - ((containerCountResponse && containerCountResponse.length > 0) ? + ((containerCountResponse?.length > 0) ?
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx index b56e8d8151ac..fdd25929d03b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx @@ -530,7 +530,7 @@ export class Om extends React.Component, IOmdbInsightsSta const { request, controller } = AxiosGetHelper(mismatchEndpoint, cancelMismatchedEndpointSignal) cancelMismatchedEndpointSignal = controller; request.then(mismatchContainersResponse => { - const mismatchContainers: IContainerResponse[] = mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.containerDiscrepancyInfo; + const mismatchContainers: IContainerResponse[] = mismatchContainersResponse?.data?.containerDiscrepancyInfo && []; this.setState({ loading: false, @@ -567,7 +567,7 @@ export class Om extends React.Component, IOmdbInsightsSta const { request, controller } = AxiosGetHelper(openKeysEndpoint, cancelOpenKeysSignal) cancelOpenKeysSignal = controller request.then(openKeysResponse => { - const openKeys = openKeysResponse && openKeysResponse.data; + const openKeys = openKeysResponse?.data ?? {"fso": []}; let allopenKeysResponse: any[] = []; for (let key in openKeys) { if (Array.isArray(openKeys[key])) { @@ -614,7 +614,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletePendingSignal = controller; request.then(deletePendingKeysResponse => { - const deletePendingKeys = deletePendingKeysResponse && deletePendingKeysResponse.data && deletePendingKeysResponse.data.deletedKeyInfo; + const deletePendingKeys = deletePendingKeysResponse?.data?.deletedKeyInfo ?? []; //Use Summation Logic iterate through all object and find sum of all datasize let deletedKeyInfoData = []; deletedKeyInfoData = deletePendingKeys && deletePendingKeys.flatMap((infoObject: any) => { @@ -714,7 +714,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletedKeysSignal = controller request.then(deletedKeysResponse => { let deletedContainerKeys = []; - deletedContainerKeys = deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.containers; + deletedContainerKeys = deletedKeysResponse?.data?.containers ?? []; this.setState({ loading: false, deletedContainerKeysDataSource: deletedContainerKeys @@ -748,7 +748,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletedPendingDirSignal = controller request.then(deletePendingDirResponse => { let deletedDirInfo = []; - deletedDirInfo = deletePendingDirResponse && deletePendingDirResponse.data && deletePendingDirResponse.data.deletedDirInfo; + deletedDirInfo = deletePendingDirResponse?.data?.deletedDirInfo ?? []; this.setState({ loading: false, pendingDeleteDirDataSource: deletedDirInfo From 51a5fb9422108e47ea10d7118f1ee9baec4a7e3b Mon Sep 17 00:00:00 2001 From: tanvipenumudy <46785609+tanvipenumudy@users.noreply.github.com> Date: Wed, 28 Aug 2024 03:34:08 +0530 Subject: [PATCH 012/106] Revert "HDDS-11235. Spare InfoBucket RPC call in FileSystem#mkdir() call. (#6990)" (#7122) --- .../hadoop/ozone/client/rpc/RpcClient.java | 2 - .../BasicRootedOzoneClientAdapterImpl.java | 121 +++++++++--------- 2 files changed, 58 insertions(+), 65 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index f01fddf40f7d..35db51b3e4de 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -2164,8 +2164,6 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, @Override public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { - verifyVolumeName(volumeName); - verifyBucketName(bucketName); String ownerName = getRealUserInfo().getShortUserName(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName) diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 14c297d9f47f..da278f17fbf0 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -260,8 +260,11 @@ private void initDefaultFsBucketLayout(OzoneConfiguration conf) } } - OzoneBucket getBucket(OFSPath ofsPath, boolean createIfNotExist)throws IOException { - return getBucket(ofsPath.getVolumeName(), ofsPath.getBucketName(), createIfNotExist); + OzoneBucket getBucket(OFSPath ofsPath, boolean createIfNotExist) + throws IOException { + + return getBucket(ofsPath.getVolumeName(), ofsPath.getBucketName(), + createIfNotExist); } /** @@ -273,7 +276,8 @@ OzoneBucket getBucket(OFSPath ofsPath, boolean createIfNotExist)throws IOExcepti * @throws IOException Exceptions other than OMException with result code * VOLUME_NOT_FOUND or BUCKET_NOT_FOUND. */ - private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean createIfNotExist) throws IOException { + private OzoneBucket getBucket(String volumeStr, String bucketStr, + boolean createIfNotExist) throws IOException { Preconditions.checkNotNull(volumeStr); Preconditions.checkNotNull(bucketStr); @@ -283,7 +287,7 @@ private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean create "getBucket: Invalid argument: given bucket string is empty."); } - OzoneBucket bucket = null; + OzoneBucket bucket; try { bucket = proxy.getBucketDetails(volumeStr, bucketStr); @@ -295,8 +299,44 @@ private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean create OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); } catch (OMException ex) { if (createIfNotExist) { - handleVolumeOrBucketCreationOnException(volumeStr, bucketStr, ex); - // Try to get the bucket again + // getBucketDetails can throw VOLUME_NOT_FOUND when the parent volume + // doesn't exist and ACL is enabled; it can only throw BUCKET_NOT_FOUND + // when ACL is disabled. Both exceptions need to be handled. + switch (ex.getResult()) { + case VOLUME_NOT_FOUND: + // Create the volume first when the volume doesn't exist + try { + objectStore.createVolume(volumeStr); + } catch (OMException newVolEx) { + // Ignore the case where another client created the volume + if (!newVolEx.getResult().equals(VOLUME_ALREADY_EXISTS)) { + throw newVolEx; + } + } + // No break here. Proceed to create the bucket + case BUCKET_NOT_FOUND: + // When BUCKET_NOT_FOUND is thrown, we expect the parent volume + // exists, so that we don't call create volume and incur + // unnecessary ACL checks which could lead to unwanted behavior. + OzoneVolume volume = proxy.getVolumeDetails(volumeStr); + // Create the bucket + try { + // Buckets created by OFS should be in FSO layout + volume.createBucket(bucketStr, + BucketArgs.newBuilder().setBucketLayout( + this.defaultOFSBucketLayout).build()); + } catch (OMException newBucEx) { + // Ignore the case where another client created the bucket + if (!newBucEx.getResult().equals(BUCKET_ALREADY_EXISTS)) { + throw newBucEx; + } + } + break; + default: + // Throw unhandled exception + throw ex; + } + // Try get bucket again bucket = proxy.getBucketDetails(volumeStr, bucketStr); } else { throw ex; @@ -306,41 +346,6 @@ private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean create return bucket; } - private void handleVolumeOrBucketCreationOnException(String volumeStr, String bucketStr, OMException ex) - throws IOException { - // OM can throw VOLUME_NOT_FOUND when the parent volume does not exist, and in this case we may create the volume, - // OM can also throw BUCKET_NOT_FOUND when the parent bucket does not exist, and so we also may create the bucket. - // This method creates the volume and the bucket when an exception marks that they don't exist. - switch (ex.getResult()) { - case VOLUME_NOT_FOUND: - // Create the volume first when the volume doesn't exist - try { - objectStore.createVolume(volumeStr); - } catch (OMException newVolEx) { - // Ignore the case where another client created the volume - if (!newVolEx.getResult().equals(VOLUME_ALREADY_EXISTS)) { - throw newVolEx; - } - } - // No break here. Proceed to create the bucket - case BUCKET_NOT_FOUND: - // Create the bucket - try { - // Buckets created by OFS should be in FSO layout - BucketArgs defaultBucketArgs = BucketArgs.newBuilder().setBucketLayout(this.defaultOFSBucketLayout).build(); - proxy.createBucket(volumeStr, bucketStr, defaultBucketArgs); - } catch (OMException newBucEx) { - // Ignore the case where another client created the bucket - if (!newBucEx.getResult().equals(BUCKET_ALREADY_EXISTS)) { - throw newBucEx; - } - } - break; - default: - throw ex; - } - } - /** * This API returns the value what is configured at client side only. It could * differ from the server side default values. If no replication config @@ -510,40 +515,30 @@ public boolean createDirectory(String pathStr) throws IOException { LOG.trace("creating dir for path: {}", pathStr); incrementCounter(Statistic.OBJECTS_CREATED, 1); OFSPath ofsPath = new OFSPath(pathStr, config); - - String volumeName = ofsPath.getVolumeName(); - if (volumeName.isEmpty()) { + if (ofsPath.getVolumeName().isEmpty()) { // Volume name unspecified, invalid param, return failure return false; } - - String bucketName = ofsPath.getBucketName(); - if (bucketName.isEmpty()) { - // Create volume only as path only contains one element the volume. - objectStore.createVolume(volumeName); + if (ofsPath.getBucketName().isEmpty()) { + // Create volume only + objectStore.createVolume(ofsPath.getVolumeName()); return true; } - String keyStr = ofsPath.getKeyName(); try { - if (keyStr == null || keyStr.isEmpty()) { - // This is the case when the given path only contains volume and bucket. - // If the bucket does not exist, then this will throw and bucket will be created - // in handleVolumeOrBucketCreationOnException later. - proxy.getBucketDetails(volumeName, bucketName); - } else { - proxy.createDirectory(volumeName, bucketName, keyStr); + OzoneBucket bucket = getBucket(ofsPath, true); + // Empty keyStr here indicates only volume and bucket is + // given in pathStr, so getBucket above should handle the creation + // of volume and bucket. We won't feed empty keyStr to + // bucket.createDirectory as that would be a NPE. + if (keyStr != null && keyStr.length() > 0) { + bucket.createDirectory(keyStr); } } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.FILE_ALREADY_EXISTS) { throw new FileAlreadyExistsException(e.getMessage()); } - // Create volume and bucket if they do not exist, and retry key creation. - // This call will throw an exception if it fails, or the exception is different than it handles. - handleVolumeOrBucketCreationOnException(volumeName, bucketName, e); - if (keyStr != null && !keyStr.isEmpty()) { - proxy.createDirectory(volumeName, bucketName, keyStr); - } + throw e; } return true; } From dab15385dc8cbc2a5ed65273c620bf12e0f14191 Mon Sep 17 00:00:00 2001 From: Galsza <109229906+Galsza@users.noreply.github.com> Date: Wed, 28 Aug 2024 01:59:41 +0200 Subject: [PATCH 013/106] HDDS-11216. Replace HAUtils#buildCAX509List usages with other direct usages (#6981) --- .../hdds/scm/client/ClientTrustManager.java | 0 .../certificate/client/CertificateClient.java | 40 +++--- .../statemachine/DatanodeStateMachine.java | 3 +- .../ECContainerOperationClient.java | 20 ++- .../client/DefaultCertificateClient.java | 50 ++------ .../org/apache/hadoop/hdds/utils/HAUtils.java | 116 ++++-------------- .../client/CertificateClientTestImpl.java | 22 ++-- .../scm/server/StorageContainerManager.java | 3 +- .../scm/cli/ContainerOperationClient.java | 2 +- .../ozone/freon/DNRPCLoadGenerator.java | 10 +- 10 files changed, 94 insertions(+), 172 deletions(-) rename hadoop-hdds/{client => common}/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java (100%) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java similarity index 100% rename from hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ClientTrustManager.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java index 79db6985e76f..e74bb1f621a1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.security.x509.certificate.client; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.exception.OzoneSecurityException; import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager; import org.apache.hadoop.hdds.security.ssl.ReloadingX509TrustManager; @@ -128,23 +129,6 @@ X509Certificate getCertificate(String certSerialId) */ Set getAllCaCerts(); - /** - * Return the pem encoded CA certificate list. - *

- * If initialized return list of pem encoded CA certificates, else return - * null. - * - * @return list of pem encoded CA certificates. - */ - List getCAList(); - - /** - * Update and returns the pem encoded CA certificate list. - * @return list of pem encoded CA certificates. - * @throws IOException - */ - List updateCAList() throws IOException; - /** * Verifies a digital Signature, given the signature and the certificate of * the signer. @@ -176,10 +160,32 @@ default void assertValidKeysAndCertificate() throws OzoneSecurityException { } } + /** + * Gets a KeyManager containing this CertificateClient's key material and trustchain. + * During certificate rotation this KeyManager is automatically updated with the new keys/certificates. + * + * @return A KeyManager containing keys and the trustchain for this CertificateClient. + * @throws CertificateException + */ ReloadingX509KeyManager getKeyManager() throws CertificateException; + /** + * Gets a TrustManager containing the trusted certificates of this CertificateClient. + * During certificate rotation this TrustManager is automatically updated with the new certificates. + * + * @return A TrustManager containing trusted certificates for this CertificateClient. + * @throws CertificateException + */ ReloadingX509TrustManager getTrustManager() throws CertificateException; + /** + * Creates a ClientTrustManager instance using the trusted certificates of this certificate client. + * + * @return The new ClientTrustManager instance. + * @throws IOException + */ + ClientTrustManager createClientTrustManager() throws IOException; + /** * Register a receiver that will be called after the certificate renewed. * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index a460e30ede39..71a9e5bca9f7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; @@ -216,7 +217,7 @@ public DatanodeStateMachine(HddsDatanodeService hddsDatanodeService, ReplicationSupervisorMetrics.create(supervisor); ecReconstructionMetrics = ECReconstructionMetrics.create(); - + ClientTrustManager clientTrustManager = null; ecReconstructionCoordinator = new ECReconstructionCoordinator( conf, certClient, secretKeyClient, context, ecReconstructionMetrics, threadNamePrefix); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java index 9dedd65565f5..487e6d37b282 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java @@ -26,12 +26,10 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; -import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.security.token.Token; @@ -69,21 +67,17 @@ public ECContainerOperationClient(ConfigurationSource conf, } @Nonnull - private static XceiverClientManager createClientManager( - ConfigurationSource conf, CertificateClient certificateClient) + private static XceiverClientManager createClientManager(ConfigurationSource conf, CertificateClient certificateClient) throws IOException { ClientTrustManager trustManager = null; if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - CACertificateProvider localCaCerts = - () -> HAUtils.buildCAX509List(certificateClient, conf); - CACertificateProvider remoteCacerts = - () -> HAUtils.buildCAX509List(null, conf); - trustManager = new ClientTrustManager(remoteCacerts, localCaCerts); + trustManager = certificateClient.createClientTrustManager(); } - return new XceiverClientManager(conf, - new XceiverClientManager.XceiverClientManagerConfigBuilder() - .setMaxCacheSize(256).setStaleThresholdMs(10 * 1000).build(), - trustManager); + XceiverClientManager.ScmClientConfig scmClientConfig = new XceiverClientManager.XceiverClientManagerConfigBuilder() + .setMaxCacheSize(256) + .setStaleThresholdMs(10 * 1000) + .build(); + return new XceiverClientManager(conf, scmClientConfig, trustManager); } public BlockData[] listBlock(long containerId, DatanodeDetails dn, diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index 70a475982bd4..b277a759cb8d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -73,6 +73,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager; import org.apache.hadoop.hdds.security.ssl.ReloadingX509TrustManager; @@ -983,43 +984,6 @@ public Set getAllCaCerts() { return certs; } - @Override - public List getCAList() { - pemEncodedCACertsLock.lock(); - try { - return pemEncodedCACerts; - } finally { - pemEncodedCACertsLock.unlock(); - } - } - - public List listCA() throws IOException { - pemEncodedCACertsLock.lock(); - try { - if (pemEncodedCACerts == null) { - updateCAList(); - } - return pemEncodedCACerts; - } finally { - pemEncodedCACertsLock.unlock(); - } - } - - @Override - public List updateCAList() throws IOException { - pemEncodedCACertsLock.lock(); - try { - pemEncodedCACerts = getScmSecureClient().listCACertificate(); - return pemEncodedCACerts; - } catch (Exception e) { - getLogger().error("Error during updating CA list", e); - throw new CertificateException("Error during updating CA list", e, - CERTIFICATE_ERROR); - } finally { - pemEncodedCACertsLock.unlock(); - } - } - @Override public ReloadingX509TrustManager getTrustManager() throws CertificateException { try { @@ -1049,8 +1013,20 @@ public ReloadingX509KeyManager getKeyManager() throws CertificateException { } } + @Override + public ClientTrustManager createClientTrustManager() throws IOException { + CACertificateProvider caCertificateProvider = () -> { + List caCerts = new ArrayList<>(); + caCerts.addAll(getAllCaCerts()); + caCerts.addAll(getAllRootCaCerts()); + return caCerts; + }; + return new ClientTrustManager(caCertificateProvider, caCertificateProvider); + } + /** * Register a receiver that will be called after the certificate renewed. + * * @param receiver */ @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java index 342a0400cbd6..0dc244bdbc79 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java @@ -35,8 +35,6 @@ import org.apache.hadoop.hdds.scm.proxy.SCMClientConfig; import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; @@ -373,80 +371,6 @@ public static List getExistingSstFiles(File db) throws IOException { return sstList; } - /** - * Build CA list which need to be passed to client. - * - * If certificate client is null, obtain the list of CA using SCM security - * client, else it uses certificate client. - * @return list of CA - */ - public static List buildCAList(CertificateClient certClient, - ConfigurationSource configuration) throws IOException { - long waitDuration = - configuration.getTimeDuration(OZONE_SCM_CA_LIST_RETRY_INTERVAL, - OZONE_SCM_CA_LIST_RETRY_INTERVAL_DEFAULT, TimeUnit.SECONDS); - if (certClient != null) { - if (!SCMHAUtils.isSCMHAEnabled(configuration)) { - return generateCAList(certClient); - } else { - Collection scmNodes = SCMHAUtils.getSCMNodeIds(configuration); - int expectedCount = scmNodes.size() + 1; - if (scmNodes.size() > 1) { - // First check if cert client has ca list initialized. - // This is being done, when this method is called multiple times we - // don't make call to SCM, we return from in-memory. - List caCertPemList = certClient.getCAList(); - if (caCertPemList != null && caCertPemList.size() == expectedCount) { - return caCertPemList; - } - return getCAListWithRetry(() -> - waitForCACerts(certClient::updateCAList, expectedCount), - waitDuration); - } else { - return generateCAList(certClient); - } - } - } else { - SCMSecurityProtocolClientSideTranslatorPB scmSecurityProtocolClient = - HddsServerUtil.getScmSecurityClient(configuration); - if (!SCMHAUtils.isSCMHAEnabled(configuration)) { - List caCertPemList = new ArrayList<>(); - SCMGetCertResponseProto scmGetCertResponseProto = - scmSecurityProtocolClient.getCACert(); - if (scmGetCertResponseProto.hasX509Certificate()) { - caCertPemList.add(scmGetCertResponseProto.getX509Certificate()); - } - if (scmGetCertResponseProto.hasX509RootCACertificate()) { - caCertPemList.add(scmGetCertResponseProto.getX509RootCACertificate()); - } - return caCertPemList; - } else { - Collection scmNodes = SCMHAUtils.getSCMNodeIds(configuration); - int expectedCount = scmNodes.size() + 1; - if (scmNodes.size() > 1) { - return getCAListWithRetry(() -> waitForCACerts( - scmSecurityProtocolClient::listCACertificate, - expectedCount), waitDuration); - } else { - return scmSecurityProtocolClient.listCACertificate(); - } - } - } - } - - private static List generateCAList(CertificateClient certClient) - throws IOException { - List caCertPemList = new ArrayList<>(); - for (X509Certificate cert : certClient.getAllRootCaCerts()) { - caCertPemList.add(CertificateCodec.getPEMEncodedString(cert)); - } - for (X509Certificate cert : certClient.getAllCaCerts()) { - caCertPemList.add(CertificateCodec.getPEMEncodedString(cert)); - } - return caCertPemList; - } - - /** * Retry forever until CA list matches expected count. * @param task - task to get CA list. @@ -488,23 +412,37 @@ private static List waitForCACerts( * Build CA List in the format of X509Certificate. * If certificate client is null, obtain the list of CA using SCM * security client, else it uses certificate client. + * * @return list of CA X509Certificates. */ - public static List buildCAX509List( - CertificateClient certClient, - ConfigurationSource conf) throws IOException { - if (certClient != null) { - // Do this here to avoid extra conversion of X509 to pem and again to - // X509 by buildCAList. - if (!SCMHAUtils.isSCMHAEnabled(conf)) { - List x509Certificates = new ArrayList<>(); - x509Certificates.addAll(certClient.getAllCaCerts()); - x509Certificates.addAll(certClient.getAllRootCaCerts()); - return x509Certificates; + public static List buildCAX509List(ConfigurationSource conf) throws IOException { + long waitDuration = + conf.getTimeDuration(OZONE_SCM_CA_LIST_RETRY_INTERVAL, + OZONE_SCM_CA_LIST_RETRY_INTERVAL_DEFAULT, TimeUnit.SECONDS); + Collection scmNodes = SCMHAUtils.getSCMNodeIds(conf); + SCMSecurityProtocolClientSideTranslatorPB scmSecurityProtocolClient = + HddsServerUtil.getScmSecurityClient(conf); + if (!SCMHAUtils.isSCMHAEnabled(conf)) { + List caCertPemList = new ArrayList<>(); + SCMGetCertResponseProto scmGetCertResponseProto = + scmSecurityProtocolClient.getCACert(); + if (scmGetCertResponseProto.hasX509Certificate()) { + caCertPemList.add(scmGetCertResponseProto.getX509Certificate()); + } + if (scmGetCertResponseProto.hasX509RootCACertificate()) { + caCertPemList.add(scmGetCertResponseProto.getX509RootCACertificate()); + } + return OzoneSecurityUtil.convertToX509(caCertPemList); + } else { + int expectedCount = scmNodes.size() + 1; + if (scmNodes.size() > 1) { + return OzoneSecurityUtil.convertToX509(getCAListWithRetry(() -> waitForCACerts( + scmSecurityProtocolClient::listCACertificate, + expectedCount), waitDuration)); + } else { + return OzoneSecurityUtil.convertToX509(scmSecurityProtocolClient.listCACertificate()); } } - List pemEncodedCerts = HAUtils.buildCAList(certClient, conf); - return OzoneSecurityUtil.convertToX509(pemEncodedCerts); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java index fa784b755384..94ef86650c48 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClientTestImpl.java @@ -48,6 +48,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.ssl.ReloadingX509KeyManager; @@ -257,16 +258,6 @@ public Set getAllCaCerts() { return rootCerts; } - @Override - public List getCAList() { - return null; - } - - @Override - public List updateCAList() throws IOException { - return null; - } - public void renewRootCA() throws Exception { LocalDateTime start = LocalDateTime.now(); Duration rootCACertDuration = securityConfig.getMaxCertificateDuration(); @@ -364,6 +355,17 @@ public ReloadingX509TrustManager getTrustManager() throws CertificateException { } } + @Override + public ClientTrustManager createClientTrustManager() throws IOException { + CACertificateProvider caCertificateProvider = () -> { + List caCerts = new ArrayList<>(); + caCerts.addAll(getAllCaCerts()); + caCerts.addAll(getAllRootCaCerts()); + return caCerts; + }; + return new ClientTrustManager(caCertificateProvider, caCertificateProvider); + } + @Override public void registerNotificationReceiver(CertificateNotification receiver) { synchronized (notificationReceivers) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index aaf6bbfc9c42..868e54f19351 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -1613,8 +1613,7 @@ private void persistSCMCertificates() throws IOException { if (primaryScmNodeId != null && !primaryScmNodeId.equals( scmStorageConfig.getScmId())) { List pemEncodedCerts = - scmCertificateClient.listCA(); - + getScmSecurityClientWithMaxRetry(configuration, getCurrentUser()).listCACertificate(); // Write the primary SCM CA and Root CA during startup. for (String cert : pemEncodedCerts) { X509Certificate x509Certificate = CertificateCodec.getX509Certificate( diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index ba556bf24e98..76334d124ea5 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -116,7 +116,7 @@ private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) throws IOException { XceiverClientManager manager; if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(null, conf); + CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(conf); manager = new XceiverClientManager(conf, conf.getObject(XceiverClientManager.ScmClientConfig.class), new ClientTrustManager(caCerts, null)); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java index f83b2a1a4a9b..a7527952ca35 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; -import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.ozone.OzoneSecurityUtil; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; @@ -150,11 +150,14 @@ public Void call() throws Exception { } encodedContainerToken = scmClient.getEncodedContainerToken(containerID); XceiverClientFactory xceiverClientManager; + OzoneManagerProtocolClientSideTranslatorPB omClient; if (OzoneSecurityUtil.isSecurityEnabled(configuration)) { - CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(null, configuration); + omClient = createOmClient(configuration, null); + CACertificateProvider caCerts = () -> omClient.getServiceInfo().provideCACerts(); xceiverClientManager = new XceiverClientCreator(configuration, new ClientTrustManager(caCerts, null)); } else { + omClient = null; xceiverClientManager = new XceiverClientCreator(configuration); } clients = new ArrayList<>(numClients); @@ -169,6 +172,9 @@ public Void call() throws Exception { try { runTests(this::sendRPCReq); } finally { + if (omClient != null) { + omClient.close(); + } for (XceiverClientSpi client : clients) { xceiverClientManager.releaseClient(client, false); } From 830629009de652e27afc442207db1b62b5dde557 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 28 Aug 2024 09:21:28 +0200 Subject: [PATCH 014/106] HDDS-11373. Log for EC reconstruction command lists the missing indexes as ASCII control characters (#7123) --- .../commands/ReconstructECContainersCommand.java | 4 ++-- .../TestReconstructionECContainersCommands.java | 13 ++++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java index f6633cb9d370..ada80c980f64 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReconstructECContainersCommand.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.ozone.protocol.commands; +import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; @@ -140,7 +140,7 @@ public String toString() { .collect(Collectors.joining(", "))).append("]") .append(", targets: ").append(getTargetDatanodes()) .append(", missingIndexes: ").append( - StringUtils.bytes2String(missingContainerIndexes.asReadOnlyByteBuffer())); + Arrays.toString(missingContainerIndexes.toByteArray())); return sb.toString(); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java index f4e4ec6a2535..519a24a2a5cf 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/protocol/commands/TestReconstructionECContainersCommands.java @@ -26,10 +26,12 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -53,11 +55,8 @@ public void testExceptionIfSourceAndMissingNotSameLength() { @Test public void protobufConversion() { - final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(new byte[]{1, 2}); - List srcNodesIndexes = new ArrayList<>(); - for (int i = 0; i < srcNodesIndexes.size(); i++) { - srcNodesIndexes.add(i + 1L); - } + byte[] missingIndexes = {1, 2}; + final ByteString missingContainerIndexes = Proto2Utils.unsafeByteString(missingIndexes); ECReplicationConfig ecReplicationConfig = new ECReplicationConfig(3, 2); final List dnDetails = getDNDetails(5); @@ -70,6 +69,10 @@ public void protobufConversion() { ReconstructECContainersCommand reconstructECContainersCommand = new ReconstructECContainersCommand(1L, sources, targets, missingContainerIndexes, ecReplicationConfig); + + assertThat(reconstructECContainersCommand.toString()) + .contains("missingIndexes: " + Arrays.toString(missingIndexes)); + StorageContainerDatanodeProtocolProtos.ReconstructECContainersCommandProto proto = reconstructECContainersCommand.getProto(); From 3bd237d1bdd07bca47bbdb4749717a4f35753991 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 28 Aug 2024 03:47:29 -0700 Subject: [PATCH 015/106] HDDS-11325. (addendum) Intermittent failure in TestBlockOutputStreamWithFailures#testContainerClose (#7121) --- .../client/rpc/TestBlockOutputStream.java | 2 -- .../rpc/TestBlockOutputStreamWithFailures.java | 18 +++++++++++------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index 8810bab51901..4c978683de1e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.TestHelper; -import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.TestInstance; @@ -662,7 +661,6 @@ void testWriteExactlyMaxFlushSize(boolean flushDelay, boolean enablePiggybacking @ParameterizedTest @MethodSource("clientParameters") - @Flaky("HDDS-11325") void testWriteMoreThanMaxFlushSize(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index f823add57bdc..5e5461634c0e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -93,7 +93,6 @@ private static Stream clientParameters() { @ParameterizedTest @MethodSource("clientParameters") - @Flaky("HDDS-11325") void testContainerClose(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -386,7 +385,8 @@ private void testWriteMoreThanMaxFlushSize(OzoneClient client) assertInstanceOf(RatisBlockOutputStream.class, keyOutputStream.getStreamEntries().get(0).getOutputStream()); - assertEquals(4, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(4); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(400, blockOutputStream.getTotalDataFlushedLength()); @@ -442,7 +442,8 @@ private void testExceptionDuringClose(OzoneClient client) throws Exception { assertInstanceOf(RatisBlockOutputStream.class, keyOutputStream.getStreamEntries().get(0).getOutputStream()); - assertEquals(2, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(2); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(0, blockOutputStream.getTotalDataFlushedLength()); @@ -455,7 +456,8 @@ private void testExceptionDuringClose(OzoneClient client) throws Exception { // Since the data in the buffer is already flushed, flush here will have // no impact on the counters and data structures - assertEquals(2, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(2); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); @@ -506,9 +508,10 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client) keyOutputStream.getStreamEntries().get(0).getOutputStream()); // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 4 buffers allocated worth of chunk size + // buffer pool will have up to 4 buffers allocated worth of chunk size - assertEquals(4, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(4); // writtenDataLength as well flushedDataLength will be updated here assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); @@ -531,7 +534,8 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client) // Since the data in the buffer is already flushed, flush here will have // no impact on the counters and data structures - assertEquals(4, blockOutputStream.getBufferPool().getSize()); + assertThat(blockOutputStream.getBufferPool().getSize()) + .isLessThanOrEqualTo(4); assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength()); From 0bd8ba1e6c17bfc4dfb3a0d1165bb1707455731c Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 28 Aug 2024 14:10:44 +0200 Subject: [PATCH 016/106] HDDS-11372. No coverage for org.apache.ozone packages (#7124) --- hadoop-ozone/dist/src/main/compose/test-all.sh | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index cb76257cd8d2..863e1d0b75a2 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -33,7 +33,7 @@ source "$SCRIPT_DIR"/testlib.sh if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then java -cp "$PROJECT_DIR"/share/coverage/$(ls "$PROJECT_DIR"/share/coverage | grep test-util):"$PROJECT_DIR"/share/coverage/jacoco-core.jar org.apache.ozone.test.JacocoServer & DOCKER_BRIDGE_IP=$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}') - export OZONE_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*" + export OZONE_OPTS="-javaagent:share/coverage/jacoco-agent.jar=output=tcpclient,address=$DOCKER_BRIDGE_IP,includes=org.apache.hadoop.ozone.*:org.apache.hadoop.hdds.*:org.apache.hadoop.fs.ozone.*:org.apache.ozone.*:org.hadoop.ozone.*" fi cd "$SCRIPT_DIR" diff --git a/pom.xml b/pom.xml index 264cd993b804..e8872fd71300 100644 --- a/pom.xml +++ b/pom.xml @@ -1751,7 +1751,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs prepare-agent - org.apache.hadoop.hdds.*,org.apache.hadoop.ozone.*,org.apache.hadoop.fs.ozone.* + org.apache.hadoop.hdds.*,org.apache.hadoop.ozone.*,org.apache.hadoop.fs.ozone.*,org.apache.ozone.*,org.hadoop.ozone.* From 41d81479e41ad651b12fe4f9bb27500326f376ef Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Wed, 28 Aug 2024 20:19:05 +0530 Subject: [PATCH 017/106] HDDS-11368. Remove dependency on Babel in Vite (#7119) --- .../recon/ozone-recon-web/package.json | 3 +- .../recon/ozone-recon-web/pnpm-lock.yaml | 533 ++++-------------- .../recon/ozone-recon-web/vite.config.ts | 9 +- 3 files changed, 131 insertions(+), 414 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index d931a0ed79b0..ab2b9e3fbe9c 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -64,7 +64,7 @@ "@types/react-select": "^3.0.13", "@typescript-eslint/eslint-plugin": "^5.30.0", "@typescript-eslint/parser": "^5.30.0", - "@vitejs/plugin-react": "^4.0.0", + "@vitejs/plugin-react-swc": "^3.5.0", "eslint": "^7.28.0", "eslint-config-prettier": "^8.10.0", "eslint-plugin-prettier": "^3.4.1", @@ -74,7 +74,6 @@ "npm-run-all": "^4.1.5", "prettier": "^2.8.4", "vite": "4.5.3", - "vite-plugin-svgr": "^4.2.0", "vite-tsconfig-paths": "^3.6.0", "vitest": "^1.6.0" }, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index d1b8844ac621..a56b0d07c22d 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -85,9 +85,9 @@ devDependencies: '@typescript-eslint/parser': specifier: ^5.30.0 version: 5.62.0(eslint@7.32.0)(typescript@4.9.5) - '@vitejs/plugin-react': - specifier: ^4.0.0 - version: 4.3.1(vite@4.5.3) + '@vitejs/plugin-react-swc': + specifier: ^3.5.0 + version: 3.7.0(vite@4.5.3) eslint: specifier: ^7.28.0 version: 7.32.0 @@ -115,9 +115,6 @@ devDependencies: vite: specifier: 4.5.3 version: 4.5.3(less@3.13.1) - vite-plugin-svgr: - specifier: ^4.2.0 - version: 4.2.0(typescript@4.9.5)(vite@4.5.3) vite-tsconfig-paths: specifier: ^3.6.0 version: 3.6.0(vite@4.5.3) @@ -131,14 +128,6 @@ packages: resolution: {integrity: sha512-Ff9+ksdQQB3rMncgqDK78uLznstjyfIf2Arnh22pW8kBpLs6rpKDwgnZT46hin5Hl1WzazzK64DOrhSwYpS7bQ==} dev: true - /@ampproject/remapping@2.3.0: - resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} - engines: {node: '>=6.0.0'} - dependencies: - '@jridgewell/gen-mapping': 0.3.5 - '@jridgewell/trace-mapping': 0.3.25 - dev: true - /@ant-design/colors@5.1.1: resolution: {integrity: sha512-Txy4KpHrp3q4XZdfgOBqLl+lkQIc3tEvHXOimRN1giX1AEC7mGtyrO9p8iRGJ3FLuVMGa2gNEzQyghVymLttKQ==} dependencies: @@ -198,34 +187,6 @@ packages: '@babel/highlight': 7.24.7 picocolors: 1.0.1 - /@babel/compat-data@7.24.7: - resolution: {integrity: sha512-qJzAIcv03PyaWqxRgO4mSU3lihncDT296vnyuE2O8uA4w3UHWI4S3hgeZd1L8W1Bft40w9JxJ2b412iDUFFRhw==} - engines: {node: '>=6.9.0'} - dev: true - - /@babel/core@7.24.7: - resolution: {integrity: sha512-nykK+LEK86ahTkX/3TgauT0ikKoNCfKHEaZYTUVupJdTLzGNvrblu4u6fa7DhZONAltdf8e662t/abY8idrd/g==} - engines: {node: '>=6.9.0'} - dependencies: - '@ampproject/remapping': 2.3.0 - '@babel/code-frame': 7.24.7 - '@babel/generator': 7.24.7 - '@babel/helper-compilation-targets': 7.24.7 - '@babel/helper-module-transforms': 7.24.7(@babel/core@7.24.7) - '@babel/helpers': 7.24.7 - '@babel/parser': 7.24.7 - '@babel/template': 7.24.7 - '@babel/traverse': 7.24.7 - '@babel/types': 7.24.7 - convert-source-map: 2.0.0 - debug: 4.3.5 - gensync: 1.0.0-beta.2 - json5: 2.2.3 - semver: 6.3.1 - transitivePeerDependencies: - - supports-color - dev: true - /@babel/generator@7.24.7: resolution: {integrity: sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==} engines: {node: '>=6.9.0'} @@ -234,23 +195,14 @@ packages: '@jridgewell/gen-mapping': 0.3.5 '@jridgewell/trace-mapping': 0.3.25 jsesc: 2.5.2 - - /@babel/helper-compilation-targets@7.24.7: - resolution: {integrity: sha512-ctSdRHBi20qWOfy27RUb4Fhp07KSJ3sXcuSvTrXrc4aG8NSYDo1ici3Vhg9bg69y5bj0Mr1lh0aeEgTvc12rMg==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/compat-data': 7.24.7 - '@babel/helper-validator-option': 7.24.7 - browserslist: 4.23.1 - lru-cache: 5.1.1 - semver: 6.3.1 - dev: true + dev: false /@babel/helper-environment-visitor@7.24.7: resolution: {integrity: sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==} engines: {node: '>=6.9.0'} dependencies: '@babel/types': 7.24.7 + dev: false /@babel/helper-function-name@7.24.7: resolution: {integrity: sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==} @@ -258,12 +210,14 @@ packages: dependencies: '@babel/template': 7.24.7 '@babel/types': 7.24.7 + dev: false /@babel/helper-hoist-variables@7.24.7: resolution: {integrity: sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==} engines: {node: '>=6.9.0'} dependencies: '@babel/types': 7.24.7 + dev: false /@babel/helper-module-imports@7.24.7: resolution: {integrity: sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==} @@ -273,65 +227,24 @@ packages: '@babel/types': 7.24.7 transitivePeerDependencies: - supports-color - - /@babel/helper-module-transforms@7.24.7(@babel/core@7.24.7): - resolution: {integrity: sha512-1fuJEwIrp+97rM4RWdO+qrRsZlAeL1lQJoPqtCYWv0NL115XM93hIH4CSRln2w52SqvmY5hqdtauB6QFCDiZNQ==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0 - dependencies: - '@babel/core': 7.24.7 - '@babel/helper-environment-visitor': 7.24.7 - '@babel/helper-module-imports': 7.24.7 - '@babel/helper-simple-access': 7.24.7 - '@babel/helper-split-export-declaration': 7.24.7 - '@babel/helper-validator-identifier': 7.24.7 - transitivePeerDependencies: - - supports-color - dev: true - - /@babel/helper-plugin-utils@7.24.7: - resolution: {integrity: sha512-Rq76wjt7yz9AAc1KnlRKNAi/dMSVWgDRx43FHoJEbcYU6xOWaE2dVPwcdTukJrjxS65GITyfbvEYHvkirZ6uEg==} - engines: {node: '>=6.9.0'} - dev: true - - /@babel/helper-simple-access@7.24.7: - resolution: {integrity: sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/traverse': 7.24.7 - '@babel/types': 7.24.7 - transitivePeerDependencies: - - supports-color - dev: true + dev: false /@babel/helper-split-export-declaration@7.24.7: resolution: {integrity: sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==} engines: {node: '>=6.9.0'} dependencies: '@babel/types': 7.24.7 + dev: false /@babel/helper-string-parser@7.24.7: resolution: {integrity: sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==} engines: {node: '>=6.9.0'} + dev: false /@babel/helper-validator-identifier@7.24.7: resolution: {integrity: sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==} engines: {node: '>=6.9.0'} - /@babel/helper-validator-option@7.24.7: - resolution: {integrity: sha512-yy1/KvjhV/ZCL+SM7hBrvnZJ3ZuT9OuZgIJAGpPEToANvc3iM6iDvBnRjtElWibHU6n8/LPR/EjX9EtIEYO3pw==} - engines: {node: '>=6.9.0'} - dev: true - - /@babel/helpers@7.24.7: - resolution: {integrity: sha512-NlmJJtvcw72yRJRcnCmGvSi+3jDEg8qFu3z0AFoymmzLx5ERVWyzd9kVXr7Th9/8yIJi2Zc6av4Tqz3wFs8QWg==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/template': 7.24.7 - '@babel/types': 7.24.7 - dev: true - /@babel/highlight@7.24.7: resolution: {integrity: sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==} engines: {node: '>=6.9.0'} @@ -347,26 +260,7 @@ packages: hasBin: true dependencies: '@babel/types': 7.24.7 - - /@babel/plugin-transform-react-jsx-self@7.24.7(@babel/core@7.24.7): - resolution: {integrity: sha512-fOPQYbGSgH0HUp4UJO4sMBFjY6DuWq+2i8rixyUMb3CdGixs/gccURvYOAhajBdKDoGajFr3mUq5rH3phtkGzw==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 - '@babel/helper-plugin-utils': 7.24.7 - dev: true - - /@babel/plugin-transform-react-jsx-source@7.24.7(@babel/core@7.24.7): - resolution: {integrity: sha512-J2z+MWzZHVOemyLweMqngXrgGC42jQ//R0KdxqkIz/OrbVIIlhFI3WigZ5fO+nwFvBlncr4MGapd8vTyc7RPNQ==} - engines: {node: '>=6.9.0'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 - '@babel/helper-plugin-utils': 7.24.7 - dev: true + dev: false /@babel/runtime@7.24.7: resolution: {integrity: sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==} @@ -381,6 +275,7 @@ packages: '@babel/code-frame': 7.24.7 '@babel/parser': 7.24.7 '@babel/types': 7.24.7 + dev: false /@babel/traverse@7.24.7: resolution: {integrity: sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==} @@ -398,6 +293,7 @@ packages: globals: 11.12.0 transitivePeerDependencies: - supports-color + dev: false /@babel/types@7.24.7: resolution: {integrity: sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==} @@ -406,6 +302,7 @@ packages: '@babel/helper-string-parser': 7.24.7 '@babel/helper-validator-identifier': 7.24.7 to-fast-properties: 2.0.0 + dev: false /@ctrl/tinycolor@3.6.1: resolution: {integrity: sha512-SITSV6aIXsuVNV3f3O0f2n/cgyEDWoSqtZMYiAmcsYHydcKrOz3gUxB/iXd/Qf08+IZX4KpgNbvUdMBmWz+kcA==} @@ -1047,20 +944,6 @@ packages: dev: true optional: true - /@rollup/pluginutils@5.1.0: - resolution: {integrity: sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g==} - engines: {node: '>=14.0.0'} - peerDependencies: - rollup: ^1.20.0||^2.0.0||^3.0.0||^4.0.0 - peerDependenciesMeta: - rollup: - optional: true - dependencies: - '@types/estree': 1.0.5 - estree-walker: 2.0.2 - picomatch: 2.3.1 - dev: true - /@rollup/rollup-android-arm-eabi@4.18.0: resolution: {integrity: sha512-Tya6xypR10giZV1XzxmH5wr25VcZSncG0pZIjfePT0OVBvqNEurzValetGNarVrGiq66EBVAFn15iYX4w6FKgQ==} cpu: [arm] @@ -1198,130 +1081,129 @@ packages: engines: {node: '>=6'} dev: true - /@svgr/babel-plugin-add-jsx-attribute@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-b9MIk7yhdS1pMCZM8VeNfUlSKVRhsHZNMl5O9SfaX0l0t5wjdgu4IDzGB8bpnGBBOjGST3rRFVsaaEtI4W6f7g==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-darwin-arm64@1.7.18: + resolution: {integrity: sha512-MwLc5U+VGPMZm8MjlFBjEB2wyT1EK0NNJ3tn+ps9fmxdFP+PL8EpMiY1O1F2t1ydy2OzBtZz81sycjM9RieFBg==} + engines: {node: '>=10'} + cpu: [arm64] + os: [darwin] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-remove-jsx-attribute@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-darwin-x64@1.7.18: + resolution: {integrity: sha512-IkukOQUw7/14VkHp446OkYGCZEHqZg9pTmTdBawlUyz2JwZMSn2VodCl7aFSdGCsU4Cwni8zKA8CCgkCCAELhw==} + engines: {node: '>=10'} + cpu: [x64] + os: [darwin] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-remove-jsx-empty-expression@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-arm-gnueabihf@1.7.18: + resolution: {integrity: sha512-ATnb6jJaBeXCqrTUawWdoOy7eP9SCI7UMcfXlYIMxX4otKKspLPAEuGA5RaNxlCcj9ObyO0J3YGbtZ6hhD2pjg==} + engines: {node: '>=10'} + cpu: [arm] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-replace-jsx-attribute-value@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-KVQ+PtIjb1BuYT3ht8M5KbzWBhdAjjUPdlMtpuw/VjT8coTrItWX6Qafl9+ji831JaJcu6PJNKCV0bp01lBNzQ==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-arm64-gnu@1.7.18: + resolution: {integrity: sha512-poHtH7zL7lEp9K2inY90lGHJABWxURAOgWNeZqrcR5+jwIe7q5KBisysH09Zf/JNF9+6iNns+U0xgWTNJzBuGA==} + engines: {node: '>=10'} + cpu: [arm64] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-svg-dynamic-title@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-omNiKqwjNmOQJ2v6ge4SErBbkooV2aAWwaPFs2vUY7p7GhVkzRkJ00kILXQvRhA6miHnNpXv7MRnnSjdRjK8og==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-arm64-musl@1.7.18: + resolution: {integrity: sha512-qnNI1WmcOV7Wz1ZDyK6WrOlzLvJ01rnni8ec950mMHWkLRMP53QvCvhF3S+7gFplWBwWJTOOPPUqJp/PlSxWyQ==} + engines: {node: '>=10'} + cpu: [arm64] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-svg-em-dimensions@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-mURHYnu6Iw3UBTbhGwE/vsngtCIbHE43xCRK7kCw4t01xyGqb2Pd+WXekRRoFOBIY29ZoOhUCTEweDMdrjfi9g==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-x64-gnu@1.7.18: + resolution: {integrity: sha512-x9SCqCLzwtlqtD5At3I1a7Gco+EuXnzrJGoucmkpeQohshHuwa+cskqsXO6u1Dz0jXJEuHbBZB9va1wYYfjgFg==} + engines: {node: '>=10'} + cpu: [x64] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-transform-react-native-svg@8.1.0(@babel/core@7.24.7): - resolution: {integrity: sha512-Tx8T58CHo+7nwJ+EhUwx3LfdNSG9R2OKfaIXXs5soiy5HtgoAEkDay9LIimLOcG8dJQH1wPZp/cnAv6S9CrR1Q==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-linux-x64-musl@1.7.18: + resolution: {integrity: sha512-qtj8iOpMMgKjzxTv+islmEY0JBsbd93nka0gzcTTmGZxKtL5jSUsYQvkxwNPZr5M9NU1fgaR3n1vE6lFmtY0IQ==} + engines: {node: '>=10'} + cpu: [x64] + os: [linux] + requiresBuild: true dev: true + optional: true - /@svgr/babel-plugin-transform-svg-component@8.0.0(@babel/core@7.24.7): - resolution: {integrity: sha512-DFx8xa3cZXTdb/k3kfPeaixecQLgKh5NVBMwD0AQxOzcZawK4oo1Jh9LbrcACUivsCA7TLG8eeWgrDXjTMhRmw==} - engines: {node: '>=12'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 + /@swc/core-win32-arm64-msvc@1.7.18: + resolution: {integrity: sha512-ltX/Ol9+Qu4SXmISCeuwVgAjSa8nzHTymknpozzVMgjXUoZMoz6lcynfKL1nCh5XLgqh0XNHUKLti5YFF8LrrA==} + engines: {node: '>=10'} + cpu: [arm64] + os: [win32] + requiresBuild: true dev: true + optional: true - /@svgr/babel-preset@8.1.0(@babel/core@7.24.7): - resolution: {integrity: sha512-7EYDbHE7MxHpv4sxvnVPngw5fuR6pw79SkcrILHJ/iMpuKySNCl5W1qcwPEpU+LgyRXOaAFgH0KhwD18wwg6ug==} - engines: {node: '>=14'} - peerDependencies: - '@babel/core': ^7.0.0-0 - dependencies: - '@babel/core': 7.24.7 - '@svgr/babel-plugin-add-jsx-attribute': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-remove-jsx-attribute': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-remove-jsx-empty-expression': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-replace-jsx-attribute-value': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-svg-dynamic-title': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-svg-em-dimensions': 8.0.0(@babel/core@7.24.7) - '@svgr/babel-plugin-transform-react-native-svg': 8.1.0(@babel/core@7.24.7) - '@svgr/babel-plugin-transform-svg-component': 8.0.0(@babel/core@7.24.7) + /@swc/core-win32-ia32-msvc@1.7.18: + resolution: {integrity: sha512-RgTcFP3wgyxnQbTCJrlgBJmgpeTXo8t807GU9GxApAXfpLZJ3swJ2GgFUmIJVdLWyffSHF5BEkF3FmF6mtH5AQ==} + engines: {node: '>=10'} + cpu: [ia32] + os: [win32] + requiresBuild: true dev: true + optional: true - /@svgr/core@8.1.0(typescript@4.9.5): - resolution: {integrity: sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==} - engines: {node: '>=14'} - dependencies: - '@babel/core': 7.24.7 - '@svgr/babel-preset': 8.1.0(@babel/core@7.24.7) - camelcase: 6.3.0 - cosmiconfig: 8.3.6(typescript@4.9.5) - snake-case: 3.0.4 - transitivePeerDependencies: - - supports-color - - typescript + /@swc/core-win32-x64-msvc@1.7.18: + resolution: {integrity: sha512-XbZ0wAgzR757+DhQcnv60Y/bK9yuWPhDNRQVFFQVRsowvK3+c6EblyfUSytIidpXgyYFzlprq/9A9ZlO/wvDWw==} + engines: {node: '>=10'} + cpu: [x64] + os: [win32] + requiresBuild: true dev: true + optional: true - /@svgr/hast-util-to-babel-ast@8.0.0: - resolution: {integrity: sha512-EbDKwO9GpfWP4jN9sGdYwPBU0kdomaPIL2Eu4YwmgP+sJeXT+L7bMwJUBnhzfH8Q2qMBqZ4fJwpCyYsAN3mt2Q==} - engines: {node: '>=14'} + /@swc/core@1.7.18: + resolution: {integrity: sha512-qL9v5N5S38ijmqiQRvCFUUx2vmxWT/JJ2rswElnyaHkOHuVoAFhBB90Ywj4RKjh3R0zOjhEcemENTyF3q3G6WQ==} + engines: {node: '>=10'} + requiresBuild: true + peerDependencies: + '@swc/helpers': '*' + peerDependenciesMeta: + '@swc/helpers': + optional: true dependencies: - '@babel/types': 7.24.7 - entities: 4.5.0 + '@swc/counter': 0.1.3 + '@swc/types': 0.1.12 + optionalDependencies: + '@swc/core-darwin-arm64': 1.7.18 + '@swc/core-darwin-x64': 1.7.18 + '@swc/core-linux-arm-gnueabihf': 1.7.18 + '@swc/core-linux-arm64-gnu': 1.7.18 + '@swc/core-linux-arm64-musl': 1.7.18 + '@swc/core-linux-x64-gnu': 1.7.18 + '@swc/core-linux-x64-musl': 1.7.18 + '@swc/core-win32-arm64-msvc': 1.7.18 + '@swc/core-win32-ia32-msvc': 1.7.18 + '@swc/core-win32-x64-msvc': 1.7.18 dev: true - /@svgr/plugin-jsx@8.1.0(@svgr/core@8.1.0): - resolution: {integrity: sha512-0xiIyBsLlr8quN+WyuxooNW9RJ0Dpr8uOnH/xrCVO8GLUcwHISwj1AG0k+LFzteTkAA0GbX0kj9q6Dk70PTiPA==} - engines: {node: '>=14'} - peerDependencies: - '@svgr/core': '*' + /@swc/counter@0.1.3: + resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==} + dev: true + + /@swc/types@0.1.12: + resolution: {integrity: sha512-wBJA+SdtkbFhHjTMYH+dEH1y4VpfGdAc2Kw/LK09i9bXd/K6j6PkDcFCEzb6iVfZMkPRrl/q0e3toqTAJdkIVA==} dependencies: - '@babel/core': 7.24.7 - '@svgr/babel-preset': 8.1.0(@babel/core@7.24.7) - '@svgr/core': 8.1.0(typescript@4.9.5) - '@svgr/hast-util-to-babel-ast': 8.0.0 - svg-parser: 2.0.4 - transitivePeerDependencies: - - supports-color + '@swc/counter': 0.1.3 dev: true /@szmarczak/http-timer@1.1.2: @@ -1377,35 +1259,6 @@ packages: resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} dev: true - /@types/babel__core@7.20.5: - resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} - dependencies: - '@babel/parser': 7.24.7 - '@babel/types': 7.24.7 - '@types/babel__generator': 7.6.8 - '@types/babel__template': 7.4.4 - '@types/babel__traverse': 7.20.6 - dev: true - - /@types/babel__generator@7.6.8: - resolution: {integrity: sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==} - dependencies: - '@babel/types': 7.24.7 - dev: true - - /@types/babel__template@7.4.4: - resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} - dependencies: - '@babel/parser': 7.24.7 - '@babel/types': 7.24.7 - dev: true - - /@types/babel__traverse@7.20.6: - resolution: {integrity: sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==} - dependencies: - '@babel/types': 7.24.7 - dev: true - /@types/cookie@0.4.1: resolution: {integrity: sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==} dev: true @@ -1651,20 +1504,15 @@ packages: eslint-visitor-keys: 3.4.3 dev: true - /@vitejs/plugin-react@4.3.1(vite@4.5.3): - resolution: {integrity: sha512-m/V2syj5CuVnaxcUJOQRel/Wr31FFXRFlnOoq1TVtkCxsY5veGMTEmpWHndrhB2U8ScHtCQB1e+4hWYExQc6Lg==} - engines: {node: ^14.18.0 || >=16.0.0} + /@vitejs/plugin-react-swc@3.7.0(vite@4.5.3): + resolution: {integrity: sha512-yrknSb3Dci6svCd/qhHqhFPDSw0QtjumcqdKMoNNzmOl5lMXTTiqzjWtG4Qask2HdvvzaNgSunbQGet8/GrKdA==} peerDependencies: - vite: ^4.2.0 || ^5.0.0 + vite: ^4 || ^5 dependencies: - '@babel/core': 7.24.7 - '@babel/plugin-transform-react-jsx-self': 7.24.7(@babel/core@7.24.7) - '@babel/plugin-transform-react-jsx-source': 7.24.7(@babel/core@7.24.7) - '@types/babel__core': 7.20.5 - react-refresh: 0.14.2 + '@swc/core': 1.7.18 vite: 4.5.3(less@3.13.1) transitivePeerDependencies: - - supports-color + - '@swc/helpers' dev: true /@vitest/expect@1.6.0: @@ -1926,10 +1774,6 @@ packages: sprintf-js: 1.0.3 dev: true - /argparse@2.0.1: - resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} - dev: true - /aria-query@5.1.3: resolution: {integrity: sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==} dependencies: @@ -2146,17 +1990,6 @@ packages: fill-range: 7.1.1 dev: true - /browserslist@4.23.1: - resolution: {integrity: sha512-TUfofFo/KsK/bWZ9TWQ5O26tsWW4Uhmt8IYklbnUa70udB6P2wA7w7o4PY4muaEPBQaAX+CEnmmIA41NVHtPVw==} - engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} - hasBin: true - dependencies: - caniuse-lite: 1.0.30001636 - electron-to-chromium: 1.4.810 - node-releases: 2.0.14 - update-browserslist-db: 1.0.16(browserslist@4.23.1) - dev: true - /buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} dependencies: @@ -2212,15 +2045,6 @@ packages: engines: {node: '>=6'} dev: true - /camelcase@6.3.0: - resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} - engines: {node: '>=10'} - dev: true - - /caniuse-lite@1.0.30001636: - resolution: {integrity: sha512-bMg2vmr8XBsbL6Lr0UHXy/21m84FTxDLWn2FSqMd5PrlbMxwJlQnC2YWYxVgp66PZE+BBNF2jYQUBKCo1FDeZg==} - dev: true - /caseless@0.12.0: resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} dev: true @@ -2441,10 +2265,6 @@ packages: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} dev: false - /convert-source-map@2.0.0: - resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} - dev: true - /cookie-signature@1.0.6: resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} dev: true @@ -2493,22 +2313,6 @@ packages: yaml: 1.10.2 dev: false - /cosmiconfig@8.3.6(typescript@4.9.5): - resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} - engines: {node: '>=14'} - peerDependencies: - typescript: '>=4.9.5' - peerDependenciesMeta: - typescript: - optional: true - dependencies: - import-fresh: 3.3.0 - js-yaml: 4.1.0 - parse-json: 5.2.0 - path-type: 4.0.0 - typescript: 4.9.5 - dev: true - /cross-spawn@5.1.0: resolution: {integrity: sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A==} dependencies: @@ -2787,13 +2591,6 @@ packages: csstype: 3.1.3 dev: false - /dot-case@3.0.4: - resolution: {integrity: sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==} - dependencies: - no-case: 3.0.4 - tslib: 2.6.3 - dev: true - /dot-prop@4.2.1: resolution: {integrity: sha512-l0p4+mIuJIua0mhxGoh4a+iNL9bmeK5DvnSVQa6T0OhrVmaEa1XScX5Etc673FePCJOArq/4Pa2cLGODUWTPOQ==} engines: {node: '>=4'} @@ -2827,10 +2624,6 @@ packages: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} dev: true - /electron-to-chromium@1.4.810: - resolution: {integrity: sha512-Kaxhu4T7SJGpRQx99tq216gCq2nMxJo+uuT6uzz9l8TVN2stL7M06MIIXAtr9jsrLs2Glflgf2vMQRepxawOdQ==} - dev: true - /emoji-regex@7.0.3: resolution: {integrity: sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==} dev: true @@ -3214,10 +3007,6 @@ packages: engines: {node: '>=4.0'} dev: true - /estree-walker@2.0.2: - resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} - dev: true - /estree-walker@3.0.3: resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} dependencies: @@ -3519,11 +3308,6 @@ packages: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} dev: true - /gensync@1.0.0-beta.2: - resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} - engines: {node: '>=6.9.0'} - dev: true - /get-caller-file@2.0.5: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} @@ -3629,6 +3413,7 @@ packages: /globals@11.12.0: resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} engines: {node: '>=4'} + dev: false /globals@13.24.0: resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} @@ -4253,13 +4038,6 @@ packages: esprima: 4.0.1 dev: true - /js-yaml@4.1.0: - resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} - hasBin: true - dependencies: - argparse: 2.0.1 - dev: true - /jsbn@0.1.1: resolution: {integrity: sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==} dev: true @@ -4304,6 +4082,7 @@ packages: resolution: {integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==} engines: {node: '>=4'} hasBin: true + dev: false /json-buffer@3.0.0: resolution: {integrity: sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==} @@ -4319,6 +4098,7 @@ packages: /json-parse-even-better-errors@2.3.1: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + dev: false /json-parse-helpfulerror@1.0.3: resolution: {integrity: sha512-XgP0FGR77+QhUxjXkwOMkC94k3WtqEBfcnjWqhRd82qTat4SWKRE+9kUnynz/shm3I4ea2+qISvTIeGTNU7kJg==} @@ -4518,12 +4298,6 @@ packages: steno: 0.4.4 dev: true - /lower-case@2.0.2: - resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} - dependencies: - tslib: 2.6.3 - dev: true - /lowercase-keys@1.0.1: resolution: {integrity: sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==} engines: {node: '>=0.10.0'} @@ -4546,12 +4320,6 @@ packages: yallist: 2.1.2 dev: true - /lru-cache@5.1.1: - resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} - dependencies: - yallist: 3.1.1 - dev: true - /lz-string@1.5.0: resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} hasBin: true @@ -4817,13 +4585,6 @@ packages: resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} dev: true - /no-case@3.0.4: - resolution: {integrity: sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==} - dependencies: - lower-case: 2.0.2 - tslib: 2.6.3 - dev: true - /node-fetch@2.7.0: resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} engines: {node: 4.x || >=6.0.0} @@ -4836,10 +4597,6 @@ packages: whatwg-url: 5.0.0 dev: true - /node-releases@2.0.14: - resolution: {integrity: sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==} - dev: true - /normalize-package-data@2.5.0: resolution: {integrity: sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==} dependencies: @@ -5076,6 +4833,7 @@ packages: error-ex: 1.3.2 json-parse-even-better-errors: 2.3.1 lines-and-columns: 1.2.4 + dev: false /parse-ms@2.1.0: resolution: {integrity: sha512-kHt7kzLoS9VBZfUsiKjv43mr91ea+U05EyKkEtqp7vNbHxmaVuEqN7XxeEVnGrMtYOAxGrDElSi96K7EgO1zCA==} @@ -5897,11 +5655,6 @@ packages: /react-is@18.3.1: resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} - /react-refresh@0.14.2: - resolution: {integrity: sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA==} - engines: {node: '>=0.10.0'} - dev: true - /react-router-dom@5.3.4(react@16.14.0): resolution: {integrity: sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==} peerDependencies: @@ -6418,13 +6171,6 @@ packages: is-fullwidth-code-point: 3.0.0 dev: true - /snake-case@3.0.4: - resolution: {integrity: sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==} - dependencies: - dot-case: 3.0.4 - tslib: 2.6.3 - dev: true - /source-map-js@1.2.0: resolution: {integrity: sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==} engines: {node: '>=0.10.0'} @@ -6698,10 +6444,6 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} - /svg-parser@2.0.4: - resolution: {integrity: sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==} - dev: true - /symbol-tree@3.2.4: resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} dev: true @@ -6777,6 +6519,7 @@ packages: /to-fast-properties@2.0.0: resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} engines: {node: '>=4'} + dev: false /to-readable-stream@1.0.0: resolution: {integrity: sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==} @@ -6995,17 +6738,6 @@ packages: engines: {node: '>= 0.8'} dev: true - /update-browserslist-db@1.0.16(browserslist@4.23.1): - resolution: {integrity: sha512-KVbTxlBYlckhF5wgfyZXTWnMn7MMZjMu9XG8bPlliUOP9ThaF4QnhP8qrjrH7DRzHfSk0oQv1wToW+iA5GajEQ==} - hasBin: true - peerDependencies: - browserslist: '>= 4.21.0' - dependencies: - browserslist: 4.23.1 - escalade: 3.1.2 - picocolors: 1.0.1 - dev: true - /update-notifier@3.0.1: resolution: {integrity: sha512-grrmrB6Zb8DUiyDIaeRTBCkgISYUgETNe7NglEbVsrLWXeESnlCSP50WfRSj/GmzMPl6Uchj24S/p80nP/ZQrQ==} engines: {node: '>=8'} @@ -7119,21 +6851,6 @@ packages: - terser dev: true - /vite-plugin-svgr@4.2.0(typescript@4.9.5)(vite@4.5.3): - resolution: {integrity: sha512-SC7+FfVtNQk7So0XMjrrtLAbEC8qjFPifyD7+fs/E6aaNdVde6umlVVh0QuwDLdOMu7vp5RiGFsB70nj5yo0XA==} - peerDependencies: - vite: ^2.6.0 || 3 || 4 || 5 - dependencies: - '@rollup/pluginutils': 5.1.0 - '@svgr/core': 8.1.0(typescript@4.9.5) - '@svgr/plugin-jsx': 8.1.0(@svgr/core@8.1.0) - vite: 4.5.3(less@3.13.1) - transitivePeerDependencies: - - rollup - - supports-color - - typescript - dev: true - /vite-tsconfig-paths@3.6.0(vite@4.5.3): resolution: {integrity: sha512-UfsPYonxLqPD633X8cWcPFVuYzx/CMNHAjZTasYwX69sXpa4gNmQkR0XCjj82h7zhLGdTWagMjC1qfb9S+zv0A==} peerDependencies: @@ -7498,10 +7215,6 @@ packages: resolution: {integrity: sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==} dev: true - /yallist@3.1.1: - resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} - dev: true - /yaml@1.10.2: resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} engines: {node: '>= 6'} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts index ddb2832f39bc..1a079c5efa43 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/vite.config.ts @@ -21,7 +21,7 @@ import { defineConfig, splitVendorChunkPlugin } from 'vite'; import { resolve } from 'path'; -import react from '@vitejs/plugin-react'; +import react from '@vitejs/plugin-react-swc'; function pathResolve(dir: string) { return resolve(__dirname, '.', dir) @@ -29,6 +29,12 @@ function pathResolve(dir: string) { // https://vitejs.dev/config/ export default defineConfig({ + plugins: [ + react({ + devTarget: "es2015" //SWC by default bypasses the build target, set dev target explicitly + }), + splitVendorChunkPlugin() + ], build: { target: "es2015", outDir: 'build', @@ -48,7 +54,6 @@ export default defineConfig({ } } }, - plugins: [react(), splitVendorChunkPlugin()], server: { proxy: { "/api": { From 5659b7e90e7b5b291929fbd15fbde7989eb1a3c8 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Wed, 28 Aug 2024 08:09:02 -0700 Subject: [PATCH 018/106] HDDS-11375. DN startup fails due to illegal configuration of raft.grpc.message.size.max (#7128) --- .../hdds/conf/DatanodeRatisGrpcConfig.java | 17 ----------------- .../src/test/resources/ozone-site.xml | 2 +- 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java index 5b283c3a1a33..fde555208b33 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisGrpcConfig.java @@ -31,23 +31,6 @@ @ConfigGroup(prefix = HDDS_DATANODE_RATIS_PREFIX_KEY + "." + GrpcConfigKeys.PREFIX) public class DatanodeRatisGrpcConfig { - @Config(key = "message.size.max", - defaultValue = "32MB", - type = ConfigType.SIZE, - tags = {OZONE, CLIENT, PERFORMANCE}, - description = "Maximum message size allowed to be received by Grpc " + - "Channel (Server)." - ) - private int maximumMessageSize = 32 * 1024 * 1024; - - public int getMaximumMessageSize() { - return maximumMessageSize; - } - - public void setMaximumMessageSize(int maximumMessageSize) { - this.maximumMessageSize = maximumMessageSize; - } - @Config(key = "flow.control.window", defaultValue = "5MB", type = ConfigType.SIZE, diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml index 779ed2b785cb..5ea2eb89dfa3 100644 --- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml @@ -84,7 +84,7 @@ hdds.container.ratis.log.appender.queue.byte-limit - 8MB + 32MB ozone.om.ratis.log.appender.queue.byte-limit From 23f3e5b2e1196f4bd1d9d39ab09ed6fb57323bf1 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Wed, 28 Aug 2024 16:24:42 -0700 Subject: [PATCH 019/106] HDDS-11152. OMDoubleBuffer error when handling snapshot's background operations (#7112) --- .../ozone/om/snapshot/TestOmSnapshot.java | 12 +- .../snapshot/TestOzoneManagerHASnapshot.java | 104 ++++++++++++++++++ .../hadoop/ozone/om/OmSnapshotManager.java | 40 +++++-- .../hadoop/ozone/om/SnapshotChainManager.java | 17 +-- .../key/OMDirectoriesPurgeRequestWithFSO.java | 5 +- .../om/request/key/OMKeyPurgeRequest.java | 7 +- .../OMSnapshotMoveDeletedKeysRequest.java | 8 +- .../snapshot/OMSnapshotPurgeRequest.java | 6 +- .../OMDirectoriesPurgeResponseWithFSO.java | 5 +- .../om/response/key/OMKeyPurgeResponse.java | 11 +- .../OMSnapshotMoveDeletedKeysResponse.java | 10 +- .../snapshot/OMSnapshotPurgeResponse.java | 7 ++ .../service/AbstractKeyDeletingService.java | 7 +- .../ozone/om/service/KeyDeletingService.java | 6 +- .../om/service/SnapshotDeletingService.java | 3 +- .../SnapshotDirectoryCleaningService.java | 6 +- .../ozone/om/snapshot/SnapshotUtils.java | 6 +- .../key/TestOMKeyPurgeRequestAndResponse.java | 16 +-- .../om/snapshot/TestSnapshotDiffManager.java | 1 - 19 files changed, 183 insertions(+), 94 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index c123675565aa..9a6bca29b889 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -204,11 +204,7 @@ private void init() throws Exception { conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); - conf.setBoolean(OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS, - disableNativeDiff); - conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); - conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); - conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); + conf.setBoolean(OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS, disableNativeDiff); conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); @@ -1481,10 +1477,8 @@ public void testSnapDiffCancel() throws Exception { String toSnapshotTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, toSnapName); - UUID fromSnapshotID = ozoneManager.getOmSnapshotManager() - .getSnapshotInfo(fromSnapshotTableKey).getSnapshotId(); - UUID toSnapshotID = ozoneManager.getOmSnapshotManager() - .getSnapshotInfo(toSnapshotTableKey).getSnapshotId(); + UUID fromSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshotTableKey).getSnapshotId(); + UUID toSnapshotID = SnapshotUtils.getSnapshotInfo(ozoneManager, toSnapshotTableKey).getSnapshotId(); // Construct SnapshotDiffJob table key. String snapDiffJobKey = fromSnapshotID + DELIMITER + toSnapshotID; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java index 341b5b78c603..f178d00daa78 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java @@ -21,10 +21,12 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -34,20 +36,27 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.IN_PROGRESS; import static org.apache.ozone.test.LambdaTestUtils.await; @@ -72,6 +81,8 @@ public class TestOzoneManagerHASnapshot { public static void staticInit() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); + conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + conf.setTimeDuration(OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test") @@ -265,4 +276,97 @@ private void createFileKey(OzoneBucket bucket, String keyName) fileKey.write(value); } } + + /** + * This is to simulate HDDS-11152 scenario. In which a follower's doubleBuffer is lagging and accumulates purgeKey + * and purgeSnapshot in same batch. + */ + @Test + public void testKeyAndSnapshotDeletionService() throws IOException, InterruptedException, TimeoutException { + OzoneManager omLeader = cluster.getOMLeader(); + OzoneManager omFollower; + + if (omLeader != cluster.getOzoneManager(0)) { + omFollower = cluster.getOzoneManager(0); + } else { + omFollower = cluster.getOzoneManager(1); + } + + int numKeys = 5; + List keys = new ArrayList<>(); + for (int i = 0; i < numKeys; i++) { + String keyName = "key-" + RandomStringUtils.randomNumeric(10); + createFileKey(ozoneBucket, keyName); + keys.add(keyName); + } + + // Stop the key deletion service so that deleted keys get trapped in the snapshots. + omLeader.getKeyManager().getDeletingService().suspend(); + // Stop the snapshot deletion service so that deleted keys get trapped in the snapshots. + omLeader.getKeyManager().getSnapshotDeletingService().suspend(); + + // Delete half of the keys + for (int i = 0; i < numKeys / 2; i++) { + ozoneBucket.deleteKey(keys.get(i)); + } + + String snapshotName = "snap-" + RandomStringUtils.randomNumeric(10); + createSnapshot(volumeName, bucketName, snapshotName); + + store.deleteSnapshot(volumeName, bucketName, snapshotName); + + // Pause double buffer on follower node to accumulate all the key purge, snapshot delete and purge transactions. + omFollower.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer().stopDaemon(); + + long keyDeleteServiceCount = omLeader.getKeyManager().getDeletingService().getRunCount().get(); + omLeader.getKeyManager().getDeletingService().resume(); + + GenericTestUtils.waitFor( + () -> omLeader.getKeyManager().getDeletingService().getRunCount().get() > keyDeleteServiceCount, + 1000, 60000); + + long snapshotDeleteServiceCount = omLeader.getKeyManager().getSnapshotDeletingService().getRunCount().get(); + omLeader.getKeyManager().getSnapshotDeletingService().resume(); + + GenericTestUtils.waitFor( + () -> omLeader.getKeyManager().getSnapshotDeletingService().getRunCount().get() > snapshotDeleteServiceCount, + 1000, 60000); + + String tableKey = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName); + checkSnapshotIsPurgedFromDB(omLeader, tableKey); + + // Resume the DoubleBuffer and flush the pending transactions. + OzoneManagerDoubleBuffer omDoubleBuffer = + omFollower.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer(); + omDoubleBuffer.resume(); + CompletableFuture.supplyAsync(() -> { + omDoubleBuffer.flushTransactions(); + return null; + }); + omDoubleBuffer.awaitFlush(); + checkSnapshotIsPurgedFromDB(omFollower, tableKey); + } + + private void createSnapshot(String volName, String buckName, String snapName) throws IOException { + store.createSnapshot(volName, buckName, snapName); + + String tableKey = SnapshotInfo.getTableKey(volName, buckName, snapName); + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(cluster.getOMLeader(), tableKey); + String fileName = getSnapshotPath(cluster.getOMLeader().getConfiguration(), snapshotInfo); + File snapshotDir = new File(fileName); + if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) { + throw new IOException("Snapshot directory doesn't exist"); + } + } + + private void checkSnapshotIsPurgedFromDB(OzoneManager ozoneManager, String snapshotTableKey) + throws InterruptedException, TimeoutException { + GenericTestUtils.waitFor(() -> { + try { + return ozoneManager.getMetadataManager().getSnapshotInfoTable().get(snapshotTableKey) == null; + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 60000); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 0d17851ed1f7..18b29118182d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -98,6 +98,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; import static org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager.getSnapshotRootPath; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle; @@ -674,19 +675,38 @@ private ReferenceCounted getSnapshot(String snapshotTableKey, boolea } /** - * Returns true if the snapshot is in given status. - * @param key DB snapshot table key - * @param status SnapshotStatus - * @return true if the snapshot is in given status, false otherwise + * Returns OmSnapshot object and skips active check. + * This should only be used for API calls initiated by background service e.g. purgeKeys, purgeSnapshot, + * snapshotMoveDeletedKeys, and SetSnapshotProperty. */ - public boolean isSnapshotStatus(String key, - SnapshotInfo.SnapshotStatus status) - throws IOException { - return getSnapshotInfo(key).getSnapshotStatus().equals(status); + public ReferenceCounted getSnapshot(UUID snapshotId) throws IOException { + return snapshotCache.get(snapshotId); } - public SnapshotInfo getSnapshotInfo(String key) throws IOException { - return SnapshotUtils.getSnapshotInfo(ozoneManager, key); + /** + * Returns snapshotInfo from cache if it is present in cache, otherwise it checks RocksDB and return value from there. + * ################################################# + * NOTE: THIS SHOULD BE USED BY SNAPSHOT CACHE ONLY. + * ################################################# + * Sometimes, the follower OM node may be lagging that it gets purgeKeys or snapshotMoveDeletedKeys from a Snapshot, + * and purgeSnapshot for the same Snapshot one after another. And purgeSnapshot's validateAndUpdateCache gets + * executed before doubleBuffer flushes purgeKeys or snapshotMoveDeletedKeys from that Snapshot. + * This should not be a case on the leader node because SnapshotDeletingService checks that deletedTable and + * deletedDirectoryTable in DB don't have entries for the bucket before it sends a purgeSnapshot on a snapshot. + * If that happens, and we just look into the cache, the addToBatch operation will fail when it tries to open + * the DB and purgeKeys from the Snapshot because snapshot is already purged from the SnapshotInfoTable cache. + * Hence, it is needed to look into the table to make sure that snapshot exists somewhere either in cache or in DB. + */ + private SnapshotInfo getSnapshotInfo(String snapshotKey) throws IOException { + SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager().getSnapshotInfoTable().get(snapshotKey); + + if (snapshotInfo == null) { + snapshotInfo = ozoneManager.getMetadataManager().getSnapshotInfoTable().getSkipCache(snapshotKey); + } + if (snapshotInfo == null) { + throw new OMException("Snapshot '" + snapshotKey + "' is not found.", INVALID_SNAPSHOT_ERROR); + } + return snapshotInfo; } public static String getSnapshotPrefix(String snapshotName) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java index 60353590e75c..b069a174cd0f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java @@ -362,13 +362,16 @@ public synchronized void updateSnapshot(SnapshotInfo snapshotInfo) { public synchronized boolean deleteSnapshot(SnapshotInfo snapshotInfo) throws IOException { validateSnapshotChain(); - boolean status = deleteSnapshotGlobal(snapshotInfo.getSnapshotId()) && - deleteSnapshotPath(snapshotInfo.getSnapshotPath(), - snapshotInfo.getSnapshotId()); - if (status) { - snapshotIdToTableKey.remove(snapshotInfo.getSnapshotId()); - } - return status; + return deleteSnapshotGlobal(snapshotInfo.getSnapshotId()) && + deleteSnapshotPath(snapshotInfo.getSnapshotPath(), snapshotInfo.getSnapshotId()); + } + + /** + * Remove the snapshot from snapshotIdToSnapshotTableKey map. + */ + public synchronized void removeFromSnapshotIdToTable(UUID snapshotId) throws IOException { + validateSnapshotChain(); + snapshotIdToTableKey.remove(snapshotId); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index 2c182a6a5f5d..cb10c0d2e40a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -74,9 +75,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OMMetrics omMetrics = ozoneManager.getMetrics(); try { if (fromSnapshot != null) { - fromSnapshotInfo = ozoneManager.getMetadataManager() - .getSnapshotInfoTable() - .get(fromSnapshot); + fromSnapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot); } for (OzoneManagerProtocolProtos.PurgePathRequest path : purgeRequests) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 9ed921839683..5738d7945bfe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.ArrayList; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -74,14 +75,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn try { SnapshotInfo fromSnapshotInfo = null; if (fromSnapshot != null) { - fromSnapshotInfo = ozoneManager.getMetadataManager() - .getSnapshotInfoTable().get(fromSnapshot); + fromSnapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot); } omClientResponse = new OMKeyPurgeResponse(omResponse.build(), keysToBePurgedList, fromSnapshotInfo, keysToUpdateList); } catch (IOException ex) { - omClientResponse = new OMKeyPurgeResponse( - createErrorOMResponse(omResponse, ex)); + omClientResponse = new OMKeyPurgeResponse(createErrorOMResponse(omResponse, ex)); } return omClientResponse; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java index 122108ad65fc..df4781bb0ca6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -60,7 +59,6 @@ public OMSnapshotMoveDeletedKeysRequest(OMRequest omRequest) { @Override @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = @@ -78,8 +76,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OzoneManagerProtocolProtos.OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest()); try { - nextSnapshot = SnapshotUtils.getNextActiveSnapshot(fromSnapshot, - snapshotChainManager, omSnapshotManager); + // Check the snapshot exists. + SnapshotUtils.getSnapshotInfo(ozoneManager, fromSnapshot.getTableKey()); + + nextSnapshot = SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, ozoneManager); // Get next non-deleted snapshot. List nextDBKeysList = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 9b46aeef4c0f..47304b416aeb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -72,7 +71,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn final long trxnLogIndex = termIndex.getIndex(); - OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = @@ -106,14 +104,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } SnapshotInfo nextSnapshot = - SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); + SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, ozoneManager); // Step 1: Update the deep clean flag for the next active snapshot updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex); // Step 2: Update the snapshot chain. updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex); - // Remove and close snapshot's RocksDB instance from SnapshotCache. - omSnapshotManager.invalidateCacheEntry(fromSnapshot.getSnapshotId()); // Step 3: Purge the snapshot from SnapshotInfoTable cache. omMetadataManager.getSnapshotInfoTable() .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index edb13f8cf984..138e942e2b60 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -86,10 +86,7 @@ public void addToDBBatch(OMMetadataManager metadataManager, .getOzoneManager().getOmSnapshotManager(); try (ReferenceCounted - rcFromSnapshotInfo = omSnapshotManager.getSnapshot( - fromSnapshotInfo.getVolumeName(), - fromSnapshotInfo.getBucketName(), - fromSnapshotInfo.getName())) { + rcFromSnapshotInfo = omSnapshotManager.getSnapshot(fromSnapshotInfo.getSnapshotId())) { OmSnapshot fromSnapshot = rcFromSnapshotInfo.get(); DBStore fromSnapshotStore = fromSnapshot.getMetadataManager() .getStore(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index b16ba95d78f6..b59c7d18d408 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -75,18 +75,13 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, if (fromSnapshot != null) { OmSnapshotManager omSnapshotManager = - ((OmMetadataManagerImpl) omMetadataManager) - .getOzoneManager().getOmSnapshotManager(); + ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager(); try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.getSnapshot( - fromSnapshot.getVolumeName(), - fromSnapshot.getBucketName(), - fromSnapshot.getName())) { + omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); - DBStore fromSnapshotStore = - fromOmSnapshot.getMetadataManager().getStore(); + DBStore fromSnapshotStore = fromOmSnapshot.getMetadataManager().getStore(); // Init Batch Operation for snapshot db. try (BatchOperation writeBatch = fromSnapshotStore.initBatchOperation()) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java index 3726faacfd70..3b5a7454f9db 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java @@ -91,19 +91,13 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, .getOzoneManager().getOmSnapshotManager(); try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.getSnapshot( - fromSnapshot.getVolumeName(), - fromSnapshot.getBucketName(), - fromSnapshot.getName())) { + omSnapshotManager.getSnapshot(fromSnapshot.getSnapshotId())) { OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); if (nextSnapshot != null) { try (ReferenceCounted - rcOmNextSnapshot = omSnapshotManager.getSnapshot( - nextSnapshot.getVolumeName(), - nextSnapshot.getBucketName(), - nextSnapshot.getName())) { + rcOmNextSnapshot = omSnapshotManager.getSnapshot(nextSnapshot.getSnapshotId())) { OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); RDBStore nextSnapshotStore = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 139ce468e53d..81a020653f76 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -90,8 +90,15 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, continue; } + // Remove and close snapshot's RocksDB instance from SnapshotCache. + ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager() + .invalidateCacheEntry(snapshotInfo.getSnapshotId()); + // Remove the snapshot from snapshotId to snapshotTableKey map. + ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager() + .removeFromSnapshotIdToTable(snapshotInfo.getSnapshotId()); // Delete Snapshot checkpoint directory. deleteCheckpointDirectory(omMetadataManager, snapshotInfo); + // Delete snapshotInfo from the table. omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 429e286287c1..154bd474b6de 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; @@ -39,6 +38,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -576,8 +576,7 @@ protected boolean isBufferLimitCrossed( return cLimit + increment >= maxLimit; } - protected SnapshotInfo getPreviousActiveSnapshot(SnapshotInfo snapInfo, - SnapshotChainManager chainManager, OmSnapshotManager omSnapshotManager) + protected SnapshotInfo getPreviousActiveSnapshot(SnapshotInfo snapInfo, SnapshotChainManager chainManager) throws IOException { SnapshotInfo currSnapInfo = snapInfo; while (chainManager.hasPreviousPathSnapshot( @@ -586,7 +585,7 @@ protected SnapshotInfo getPreviousActiveSnapshot(SnapshotInfo snapInfo, UUID prevPathSnapshot = chainManager.previousPathSnapshot( currSnapInfo.getSnapshotPath(), currSnapInfo.getSnapshotId()); String tableKey = chainManager.getTableKey(prevPathSnapshot); - SnapshotInfo prevSnapInfo = omSnapshotManager.getSnapshotInfo(tableKey); + SnapshotInfo prevSnapInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, tableKey); if (prevSnapInfo.getSnapshotStatus() == SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE) { return prevSnapInfo; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index c42854828720..aa2eb6720a3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -279,13 +279,11 @@ private void processSnapshotDeepClean(int delCount) } String snapshotBucketKey = dbBucketKey + OzoneConsts.OM_KEY_PREFIX; - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( - currSnapInfo, snapChainManager, omSnapshotManager); + SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(currSnapInfo, snapChainManager); SnapshotInfo previousToPrevSnapshot = null; if (previousSnapshot != null) { - previousToPrevSnapshot = getPreviousActiveSnapshot( - previousSnapshot, snapChainManager, omSnapshotManager); + previousToPrevSnapshot = getPreviousActiveSnapshot(previousSnapshot, snapChainManager); } Table previousKeyTable = null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index 99e3903447d4..a98081c63a17 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -207,8 +207,7 @@ public BackgroundTaskResult call() throws InterruptedException { } //TODO: [SNAPSHOT] Add lock to deletedTable and Active DB. - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( - snapInfo, chainManager, omSnapshotManager); + SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(snapInfo, chainManager); Table previousKeyTable = null; Table previousDirTable = null; OmSnapshot omPreviousSnapshot = null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index fe0f6e111ed3..9746b4421b77 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -176,8 +176,7 @@ public BackgroundTaskResult call() { "unexpected state."); } - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( - currSnapInfo, snapChainManager, omSnapshotManager); + SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(currSnapInfo, snapChainManager); SnapshotInfo previousToPrevSnapshot = null; Table previousKeyTable = null; @@ -194,8 +193,7 @@ public BackgroundTaskResult call() { .getKeyTable(bucketInfo.getBucketLayout()); prevRenamedTable = omPreviousSnapshot .getMetadataManager().getSnapshotRenamedTable(); - previousToPrevSnapshot = getPreviousActiveSnapshot( - previousSnapshot, snapChainManager, omSnapshotManager); + previousToPrevSnapshot = getPreviousActiveSnapshot(previousSnapshot, snapChainManager); } Table previousToPrevKeyTable = null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 2041fa791a76..e0f40dabd8a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -143,7 +142,7 @@ public static void checkSnapshotActive(SnapshotInfo snapInfo, * Get the next non deleted snapshot in the snapshot chain. */ public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, - SnapshotChainManager chainManager, OmSnapshotManager omSnapshotManager) + SnapshotChainManager chainManager, OzoneManager ozoneManager) throws IOException { // If the snapshot is deleted in the previous run, then the in-memory @@ -162,8 +161,7 @@ public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); String tableKey = chainManager.getTableKey(nextPathSnapshot); - SnapshotInfo nextSnapshotInfo = - omSnapshotManager.getSnapshotInfo(tableKey); + SnapshotInfo nextSnapshotInfo = getSnapshotInfo(ozoneManager, tableKey); if (nextSnapshotInfo.getSnapshotStatus().equals( SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE)) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index a912f549b3ce..2cd0de920be4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -210,17 +210,8 @@ public void testKeyPurgeInSnapshot() throws Exception { assertFalse(omMetadataManager.getDeletedTable().isExist(deletedKey)); } - SnapshotInfo fromSnapshotInfo = new SnapshotInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setName("snap1") - .build(); - - ReferenceCounted rcOmSnapshot = - ozoneManager.getOmSnapshotManager().getSnapshot( - fromSnapshotInfo.getVolumeName(), - fromSnapshotInfo.getBucketName(), - fromSnapshotInfo.getName()); + ReferenceCounted rcOmSnapshot = ozoneManager.getOmSnapshotManager() + .getSnapshot(snapInfo.getVolumeName(), snapInfo.getBucketName(), snapInfo.getName()); OmSnapshot omSnapshot = rcOmSnapshot.get(); // The keys should be present in the snapshot's deletedTable @@ -248,8 +239,7 @@ public void testKeyPurgeInSnapshot() throws Exception { try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { - OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse( - omResponse, deletedKeyNames, fromSnapshotInfo, null); + OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse(omResponse, deletedKeyNames, snapInfo, null); omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation); // Do manual commit and see whether addToBatch is successful or not. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index d07372c4fc68..0f2ab6150669 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -364,7 +364,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { omSnapshotManager = mock(OmSnapshotManager.class); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - when(omSnapshotManager.isSnapshotStatus(any(), any())).thenReturn(true); SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10, omMetrics, 0); when(omSnapshotManager.getActiveSnapshot(anyString(), anyString(), anyString())) From 3e9cdb609b5c9a1856891b6467859c7fc9fdcc77 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 29 Aug 2024 08:31:48 +0200 Subject: [PATCH 020/106] HDDS-11378. Allow disabling OM version-specific feature via config (#7129) --- .../main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java | 3 +++ .../dist/src/main/compose/compatibility/docker-config | 1 + .../dist/src/main/smoketest/compatibility/om.robot | 5 +++++ .../apache/hadoop/ozone/TestOzoneConfigurationFields.java | 1 + .../main/java/org/apache/hadoop/ozone/om/OzoneManager.java | 7 +++++++ .../hadoop/ozone/om/request/key/OMKeyCommitRequest.java | 5 +++++ .../hadoop/ozone/om/request/key/OMKeyCreateRequest.java | 5 +++++ 7 files changed, 27 insertions(+) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 0f3b55235be9..46becc9e64b5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -73,6 +73,9 @@ private OMConfigKeys() { public static final String OZONE_OM_DECOMMISSIONED_NODES_KEY = "ozone.om.decommissioned.nodes"; + public static final String OZONE_OM_FEATURES_DISABLED = + "ozone.om.features.disabled"; + public static final String OZONE_OM_ADDRESS_KEY = "ozone.om.address"; public static final String OZONE_OM_BIND_HOST_DEFAULT = diff --git a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config index a5727d2b1e4a..d3984110d8d3 100644 --- a/hadoop-ozone/dist/src/main/compose/compatibility/docker-config +++ b/hadoop-ozone/dist/src/main/compose/compatibility/docker-config @@ -31,6 +31,7 @@ OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB OZONE-SITE.XML_ozone.recon.address=recon:9891 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http +OZONE-SITE.XML_ozone.om.features.disabled=ATOMIC_REWRITE_KEY HADOOP_OPTS="-Dhadoop.opts=test" HDFS_STORAGECONTAINERMANAGER_OPTS="-Dhdfs.scm.opts=test" diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot index dc862d59c1a6..c3caec2ae917 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/om.robot @@ -25,3 +25,8 @@ Picks up command line options ${processes} = List All Processes Should Contain ${processes} %{HDFS_OM_OPTS} Should Contain ${processes} %{HADOOP_OPTS} + +Rejects Atomic Key Rewrite + Execute ozone freon ockg -n1 -t1 -p rewrite + ${output} = Execute and check rc ozone sh key rewrite -t EC -r rs-3-2-1024k /vol1/bucket1/rewrite/0 255 + Should Contain ${output} Feature disabled: ATOMIC_REWRITE_KEY diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index cc1f93fbc1e0..8a219514d34e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -93,6 +93,7 @@ private void addPropertiesNotInXml() { OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, OMConfigKeys.OZONE_FS_TRASH_CHECKPOINT_INTERVAL_KEY, OMConfigKeys.OZONE_OM_S3_GPRC_SERVER_ENABLED, + OMConfigKeys.OZONE_OM_FEATURES_DISABLED, OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE, OzoneConfigKeys.OZONE_CLIENT_REQUIRED_OM_VERSION_MIN_KEY, OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_SCRUBBING_SERVICE_WORKERS, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 2fb15ec3d609..602158bb5c33 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -5008,4 +5008,11 @@ public void awaitDoubleBufferFlush() throws InterruptedException { getOmServerProtocol().awaitDoubleBufferFlush(); } } + + public void checkFeatureEnabled(OzoneManagerVersion feature) throws OMException { + String disabledFeatures = configuration.get(OMConfigKeys.OZONE_OM_FEATURES_DISABLED, ""); + if (disabledFeatures.contains(feature.name())) { + throw new OMException("Feature disabled: " + feature, OMException.ResultCodes.NOT_SUPPORTED_OPERATION); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index b8bf89a35427..8a21fdfa3773 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -28,6 +28,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -96,6 +97,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { KeyArgs keyArgs = commitKeyRequest.getKeyArgs(); + if (keyArgs.hasExpectedDataGeneration()) { + ozoneManager.checkFeatureEnabled(OzoneManagerVersion.ATOMIC_REWRITE_KEY); + } + // Verify key name final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index d0ed0eacecdd..dee5bb0fe0e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -28,6 +28,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.OzoneManagerVersion; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; @@ -93,6 +94,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { KeyArgs keyArgs = createKeyRequest.getKeyArgs(); + if (keyArgs.hasExpectedDataGeneration()) { + ozoneManager.checkFeatureEnabled(OzoneManagerVersion.ATOMIC_REWRITE_KEY); + } + // Verify key name OmUtils.verifyKeyNameWithSnapshotReservedWord(keyArgs.getKeyName()); final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() From 23211c19324bb67e67874ac71797ce63b8e47eb9 Mon Sep 17 00:00:00 2001 From: VarshaRavi <30603028+VarshaRaviCV@users.noreply.github.com> Date: Thu, 29 Aug 2024 16:29:03 +0530 Subject: [PATCH 021/106] HDDS-11381. Adding logging for sortByDistanceCost in NetworkTopologyImpl (#7133) --- .../org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 1f3d0f02e6de..31e83f82d694 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -789,6 +789,9 @@ public List sortByDistanceCost(Node reader, List shuffledNodes = new ArrayList<>(nodes.subList(0, activeLen)); shuffleOperation.accept(shuffledNodes); + if (LOG.isDebugEnabled()) { + LOG.debug("Sorted datanodes {}, result: {}", nodes, shuffledNodes); + } return shuffledNodes; } // Sort weights for the nodes array @@ -815,6 +818,9 @@ public List sortByDistanceCost(Node reader, Preconditions.checkState(ret.size() == activeLen, "Wrong number of nodes sorted!"); + if (LOG.isDebugEnabled()) { + LOG.debug("Sorted datanodes {} for client {}, result: {}", nodes, reader, ret); + } return ret; } From f22c6f8dfcc3e2ac822189e207d4cc85fc6fc490 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Thu, 29 Aug 2024 18:10:23 +0530 Subject: [PATCH 022/106] HDDS-11164. Improve Navbar UI (#7088) --- .../recon/ozone-recon-web/package.json | 2 +- .../recon/ozone-recon-web/pnpm-lock.yaml | 695 +++++++++--------- .../webapps/recon/ozone-recon-web/src/app.tsx | 7 +- .../src/components/navBar/navBar.less | 2 +- .../src/utils/axiosRequestHelper.tsx | 4 +- .../src/v2/components/navBar/navBar.less | 65 ++ .../src/v2/components/navBar/navBar.tsx | 180 +++++ .../src/v2/pages/overview/overview.tsx | 22 +- .../src/v2/pages/volumes/volumes.tsx | 14 +- 9 files changed, 604 insertions(+), 387 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index ab2b9e3fbe9c..141cdadcbe83 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -60,7 +60,7 @@ "@testing-library/react": "^12.1.5", "@types/react": "16.8.15", "@types/react-dom": "16.8.4", - "@types/react-router-dom": "^4.3.5", + "@types/react-router-dom": "^5.3.3", "@types/react-select": "^3.0.13", "@typescript-eslint/eslint-plugin": "^5.30.0", "@typescript-eslint/parser": "^5.30.0", diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index a56b0d07c22d..ebbc4e2219da 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -28,7 +28,7 @@ dependencies: version: 2.5.1 echarts: specifier: ^5.5.0 - version: 5.5.0 + version: 5.5.1 filesize: specifier: ^6.4.0 version: 6.4.0 @@ -63,7 +63,7 @@ dependencies: devDependencies: '@testing-library/jest-dom': specifier: ^6.4.8 - version: 6.4.8 + version: 6.5.0 '@testing-library/react': specifier: ^12.1.5 version: 12.1.5(react-dom@16.14.0)(react@16.14.0) @@ -74,8 +74,8 @@ devDependencies: specifier: 16.8.4 version: 16.8.4 '@types/react-router-dom': - specifier: ^4.3.5 - version: 4.3.5 + specifier: ^5.3.3 + version: 5.3.3 '@types/react-select': specifier: ^3.0.13 version: 3.1.2 @@ -99,7 +99,7 @@ devDependencies: version: 3.4.1(eslint-config-prettier@8.10.0)(eslint@7.32.0)(prettier@2.8.8) jsdom: specifier: ^24.1.1 - version: 24.1.1 + version: 24.1.3 json-server: specifier: ^0.15.1 version: 0.15.1 @@ -120,7 +120,7 @@ devDependencies: version: 3.6.0(vite@4.5.3) vitest: specifier: ^1.6.0 - version: 1.6.0(jsdom@24.1.1)(less@3.13.1) + version: 1.6.0(jsdom@24.1.3)(less@3.13.1) packages: @@ -153,7 +153,7 @@ packages: dependencies: '@ant-design/colors': 6.0.0 '@ant-design/icons-svg': 4.4.2 - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 lodash: 4.17.21 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -166,7 +166,7 @@ packages: peerDependencies: react: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 json2mq: 0.2.0 lodash: 4.17.21 @@ -187,57 +187,28 @@ packages: '@babel/highlight': 7.24.7 picocolors: 1.0.1 - /@babel/generator@7.24.7: - resolution: {integrity: sha512-oipXieGC3i45Y1A41t4tAqpnEZWgB/lC6Ehh6+rOviR5XWpTtMmLN+fGjz9vOiNRt0p6RtO6DtD0pdU3vpqdSA==} + /@babel/generator@7.25.5: + resolution: {integrity: sha512-abd43wyLfbWoxC6ahM8xTkqLpGB2iWBVyuKC9/srhFunCd1SDNrV1s72bBpK4hLj8KLzHBBcOblvLQZBNw9r3w==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.24.7 + '@babel/types': 7.25.4 '@jridgewell/gen-mapping': 0.3.5 '@jridgewell/trace-mapping': 0.3.25 jsesc: 2.5.2 dev: false - /@babel/helper-environment-visitor@7.24.7: - resolution: {integrity: sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/types': 7.24.7 - dev: false - - /@babel/helper-function-name@7.24.7: - resolution: {integrity: sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/template': 7.24.7 - '@babel/types': 7.24.7 - dev: false - - /@babel/helper-hoist-variables@7.24.7: - resolution: {integrity: sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/types': 7.24.7 - dev: false - /@babel/helper-module-imports@7.24.7: resolution: {integrity: sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==} engines: {node: '>=6.9.0'} dependencies: - '@babel/traverse': 7.24.7 - '@babel/types': 7.24.7 + '@babel/traverse': 7.25.4 + '@babel/types': 7.25.4 transitivePeerDependencies: - supports-color dev: false - /@babel/helper-split-export-declaration@7.24.7: - resolution: {integrity: sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==} - engines: {node: '>=6.9.0'} - dependencies: - '@babel/types': 7.24.7 - dev: false - - /@babel/helper-string-parser@7.24.7: - resolution: {integrity: sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==} + /@babel/helper-string-parser@7.24.8: + resolution: {integrity: sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==} engines: {node: '>=6.9.0'} dev: false @@ -254,52 +225,49 @@ packages: js-tokens: 4.0.0 picocolors: 1.0.1 - /@babel/parser@7.24.7: - resolution: {integrity: sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==} + /@babel/parser@7.25.4: + resolution: {integrity: sha512-nq+eWrOgdtu3jG5Os4TQP3x3cLA8hR8TvJNjD8vnPa20WGycimcparWnLK4jJhElTK6SDyuJo1weMKO/5LpmLA==} engines: {node: '>=6.0.0'} hasBin: true dependencies: - '@babel/types': 7.24.7 + '@babel/types': 7.25.4 dev: false - /@babel/runtime@7.24.7: - resolution: {integrity: sha512-UwgBRMjJP+xv857DCngvqXI3Iq6J4v0wXmwc6sapg+zyhbwmQX67LUEFrkK5tbyJ30jGuG3ZvWpBiB9LCy1kWw==} + /@babel/runtime@7.25.4: + resolution: {integrity: sha512-DSgLeL/FNcpXuzav5wfYvHCGvynXkJbn3Zvc3823AEe9nPwW9IK4UoCSS5yGymmQzN0pCPvivtgS6/8U2kkm1w==} engines: {node: '>=6.9.0'} dependencies: regenerator-runtime: 0.14.1 - /@babel/template@7.24.7: - resolution: {integrity: sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==} + /@babel/template@7.25.0: + resolution: {integrity: sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==} engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/parser': 7.24.7 - '@babel/types': 7.24.7 + '@babel/parser': 7.25.4 + '@babel/types': 7.25.4 dev: false - /@babel/traverse@7.24.7: - resolution: {integrity: sha512-yb65Ed5S/QAcewNPh0nZczy9JdYXkkAbIsEo+P7BE7yO3txAY30Y/oPa3QkQ5It3xVG2kpKMg9MsdxZaO31uKA==} + /@babel/traverse@7.25.4: + resolution: {integrity: sha512-VJ4XsrD+nOvlXyLzmLzUs/0qjFS4sK30te5yEFlvbbUNEgKaVb2BHZUpAL+ttLPQAHNrsI3zZisbfha5Cvr8vg==} engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/generator': 7.24.7 - '@babel/helper-environment-visitor': 7.24.7 - '@babel/helper-function-name': 7.24.7 - '@babel/helper-hoist-variables': 7.24.7 - '@babel/helper-split-export-declaration': 7.24.7 - '@babel/parser': 7.24.7 - '@babel/types': 7.24.7 - debug: 4.3.5 + '@babel/generator': 7.25.5 + '@babel/parser': 7.25.4 + '@babel/template': 7.25.0 + '@babel/types': 7.25.4 + debug: 4.3.6 globals: 11.12.0 transitivePeerDependencies: - supports-color dev: false - /@babel/types@7.24.7: - resolution: {integrity: sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==} + /@babel/types@7.25.4: + resolution: {integrity: sha512-zQ1ijeeCXVEh+aNL0RlmkPkG8HUiDcU2pzQQFjtbntgAczRASFzj4H+6+bV+dy1ntKR14I/DypeuRG1uma98iQ==} engines: {node: '>=6.9.0'} dependencies: - '@babel/helper-string-parser': 7.24.7 + '@babel/helper-string-parser': 7.24.8 '@babel/helper-validator-identifier': 7.24.7 to-fast-properties: 2.0.0 dev: false @@ -327,7 +295,7 @@ packages: peerDependencies: react: '>=16.3.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 '@emotion/cache': 10.0.29 '@emotion/css': 10.0.27 '@emotion/serialize': 0.11.16 @@ -801,8 +769,8 @@ packages: eslint-visitor-keys: 3.4.3 dev: true - /@eslint-community/regexpp@4.10.1: - resolution: {integrity: sha512-Zm2NGpWELsQAD1xsJzGQpYfvICSsFkEpU0jxBjfdC6uNEWXcHnfs9hScFWtXVDVl+rBQJGrl4g1vcKIejpH9dA==} + /@eslint-community/regexpp@4.11.0: + resolution: {integrity: sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==} engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} dev: true @@ -811,7 +779,7 @@ packages: engines: {node: ^10.12.0 || >=12.0.0} dependencies: ajv: 6.12.6 - debug: 4.3.5 + debug: 4.3.6 espree: 7.3.1 globals: 13.24.0 ignore: 4.0.6 @@ -833,7 +801,7 @@ packages: deprecated: Use @eslint/config-array instead dependencies: '@humanwhocodes/object-schema': 1.2.1 - debug: 4.3.5 + debug: 4.3.6 minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -868,7 +836,7 @@ packages: engines: {node: '>=6.0.0'} dependencies: '@jridgewell/set-array': 1.2.1 - '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/sourcemap-codec': 1.5.0 '@jridgewell/trace-mapping': 0.3.25 /@jridgewell/resolve-uri@3.1.2: @@ -879,21 +847,21 @@ packages: resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} engines: {node: '>=6.0.0'} - /@jridgewell/sourcemap-codec@1.4.15: - resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + /@jridgewell/sourcemap-codec@1.5.0: + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} /@jridgewell/trace-mapping@0.3.25: resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} dependencies: '@jridgewell/resolve-uri': 3.1.2 - '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/sourcemap-codec': 1.5.0 /@mswjs/cookies@0.2.2: resolution: {integrity: sha512-mlN83YSrcFgk7Dm1Mys40DLssI1KdJji2CMKN8eOlBqsTADYzj2+jWzsANsUTFbxDMWPD5e9bfA1RGqBpS3O1g==} engines: {node: '>=14'} dependencies: '@types/set-cookie-parser': 2.4.10 - set-cookie-parser: 2.6.0 + set-cookie-parser: 2.7.0 dev: true /@mswjs/interceptors@0.17.10: @@ -903,7 +871,7 @@ packages: '@open-draft/until': 1.0.3 '@types/debug': 4.1.12 '@xmldom/xmldom': 0.8.10 - debug: 4.3.5 + debug: 4.3.6 headers-polyfill: 3.2.5 outvariant: 1.4.3 strict-event-emitter: 0.2.8 @@ -944,128 +912,128 @@ packages: dev: true optional: true - /@rollup/rollup-android-arm-eabi@4.18.0: - resolution: {integrity: sha512-Tya6xypR10giZV1XzxmH5wr25VcZSncG0pZIjfePT0OVBvqNEurzValetGNarVrGiq66EBVAFn15iYX4w6FKgQ==} + /@rollup/rollup-android-arm-eabi@4.21.1: + resolution: {integrity: sha512-2thheikVEuU7ZxFXubPDOtspKn1x0yqaYQwvALVtEcvFhMifPADBrgRPyHV0TF3b+9BgvgjgagVyvA/UqPZHmg==} cpu: [arm] os: [android] requiresBuild: true dev: true optional: true - /@rollup/rollup-android-arm64@4.18.0: - resolution: {integrity: sha512-avCea0RAP03lTsDhEyfy+hpfr85KfyTctMADqHVhLAF3MlIkq83CP8UfAHUssgXTYd+6er6PaAhx/QGv4L1EiA==} + /@rollup/rollup-android-arm64@4.21.1: + resolution: {integrity: sha512-t1lLYn4V9WgnIFHXy1d2Di/7gyzBWS8G5pQSXdZqfrdCGTwi1VasRMSS81DTYb+avDs/Zz4A6dzERki5oRYz1g==} cpu: [arm64] os: [android] requiresBuild: true dev: true optional: true - /@rollup/rollup-darwin-arm64@4.18.0: - resolution: {integrity: sha512-IWfdwU7KDSm07Ty0PuA/W2JYoZ4iTj3TUQjkVsO/6U+4I1jN5lcR71ZEvRh52sDOERdnNhhHU57UITXz5jC1/w==} + /@rollup/rollup-darwin-arm64@4.21.1: + resolution: {integrity: sha512-AH/wNWSEEHvs6t4iJ3RANxW5ZCK3fUnmf0gyMxWCesY1AlUj8jY7GC+rQE4wd3gwmZ9XDOpL0kcFnCjtN7FXlA==} cpu: [arm64] os: [darwin] requiresBuild: true dev: true optional: true - /@rollup/rollup-darwin-x64@4.18.0: - resolution: {integrity: sha512-n2LMsUz7Ynu7DoQrSQkBf8iNrjOGyPLrdSg802vk6XT3FtsgX6JbE8IHRvposskFm9SNxzkLYGSq9QdpLYpRNA==} + /@rollup/rollup-darwin-x64@4.21.1: + resolution: {integrity: sha512-dO0BIz/+5ZdkLZrVgQrDdW7m2RkrLwYTh2YMFG9IpBtlC1x1NPNSXkfczhZieOlOLEqgXOFH3wYHB7PmBtf+Bg==} cpu: [x64] os: [darwin] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm-gnueabihf@4.18.0: - resolution: {integrity: sha512-C/zbRYRXFjWvz9Z4haRxcTdnkPt1BtCkz+7RtBSuNmKzMzp3ZxdM28Mpccn6pt28/UWUCTXa+b0Mx1k3g6NOMA==} + /@rollup/rollup-linux-arm-gnueabihf@4.21.1: + resolution: {integrity: sha512-sWWgdQ1fq+XKrlda8PsMCfut8caFwZBmhYeoehJ05FdI0YZXk6ZyUjWLrIgbR/VgiGycrFKMMgp7eJ69HOF2pQ==} cpu: [arm] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm-musleabihf@4.18.0: - resolution: {integrity: sha512-l3m9ewPgjQSXrUMHg93vt0hYCGnrMOcUpTz6FLtbwljo2HluS4zTXFy2571YQbisTnfTKPZ01u/ukJdQTLGh9A==} + /@rollup/rollup-linux-arm-musleabihf@4.21.1: + resolution: {integrity: sha512-9OIiSuj5EsYQlmwhmFRA0LRO0dRRjdCVZA3hnmZe1rEwRk11Jy3ECGGq3a7RrVEZ0/pCsYWx8jG3IvcrJ6RCew==} cpu: [arm] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm64-gnu@4.18.0: - resolution: {integrity: sha512-rJ5D47d8WD7J+7STKdCUAgmQk49xuFrRi9pZkWoRD1UeSMakbcepWXPF8ycChBoAqs1pb2wzvbY6Q33WmN2ftw==} + /@rollup/rollup-linux-arm64-gnu@4.21.1: + resolution: {integrity: sha512-0kuAkRK4MeIUbzQYu63NrJmfoUVicajoRAL1bpwdYIYRcs57iyIV9NLcuyDyDXE2GiZCL4uhKSYAnyWpjZkWow==} cpu: [arm64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm64-musl@4.18.0: - resolution: {integrity: sha512-be6Yx37b24ZwxQ+wOQXXLZqpq4jTckJhtGlWGZs68TgdKXJgw54lUUoFYrg6Zs/kjzAQwEwYbp8JxZVzZLRepQ==} + /@rollup/rollup-linux-arm64-musl@4.21.1: + resolution: {integrity: sha512-/6dYC9fZtfEY0vozpc5bx1RP4VrtEOhNQGb0HwvYNwXD1BBbwQ5cKIbUVVU7G2d5WRE90NfB922elN8ASXAJEA==} cpu: [arm64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-powerpc64le-gnu@4.18.0: - resolution: {integrity: sha512-hNVMQK+qrA9Todu9+wqrXOHxFiD5YmdEi3paj6vP02Kx1hjd2LLYR2eaN7DsEshg09+9uzWi2W18MJDlG0cxJA==} + /@rollup/rollup-linux-powerpc64le-gnu@4.21.1: + resolution: {integrity: sha512-ltUWy+sHeAh3YZ91NUsV4Xg3uBXAlscQe8ZOXRCVAKLsivGuJsrkawYPUEyCV3DYa9urgJugMLn8Z3Z/6CeyRQ==} cpu: [ppc64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-riscv64-gnu@4.18.0: - resolution: {integrity: sha512-ROCM7i+m1NfdrsmvwSzoxp9HFtmKGHEqu5NNDiZWQtXLA8S5HBCkVvKAxJ8U+CVctHwV2Gb5VUaK7UAkzhDjlg==} + /@rollup/rollup-linux-riscv64-gnu@4.21.1: + resolution: {integrity: sha512-BggMndzI7Tlv4/abrgLwa/dxNEMn2gC61DCLrTzw8LkpSKel4o+O+gtjbnkevZ18SKkeN3ihRGPuBxjaetWzWg==} cpu: [riscv64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-s390x-gnu@4.18.0: - resolution: {integrity: sha512-0UyyRHyDN42QL+NbqevXIIUnKA47A+45WyasO+y2bGJ1mhQrfrtXUpTxCOrfxCR4esV3/RLYyucGVPiUsO8xjg==} + /@rollup/rollup-linux-s390x-gnu@4.21.1: + resolution: {integrity: sha512-z/9rtlGd/OMv+gb1mNSjElasMf9yXusAxnRDrBaYB+eS1shFm6/4/xDH1SAISO5729fFKUkJ88TkGPRUh8WSAA==} cpu: [s390x] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-x64-gnu@4.18.0: - resolution: {integrity: sha512-xuglR2rBVHA5UsI8h8UbX4VJ470PtGCf5Vpswh7p2ukaqBGFTnsfzxUBetoWBWymHMxbIG0Cmx7Y9qDZzr648w==} + /@rollup/rollup-linux-x64-gnu@4.21.1: + resolution: {integrity: sha512-kXQVcWqDcDKw0S2E0TmhlTLlUgAmMVqPrJZR+KpH/1ZaZhLSl23GZpQVmawBQGVhyP5WXIsIQ/zqbDBBYmxm5w==} cpu: [x64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-x64-musl@4.18.0: - resolution: {integrity: sha512-LKaqQL9osY/ir2geuLVvRRs+utWUNilzdE90TpyoX0eNqPzWjRm14oMEE+YLve4k/NAqCdPkGYDaDF5Sw+xBfg==} + /@rollup/rollup-linux-x64-musl@4.21.1: + resolution: {integrity: sha512-CbFv/WMQsSdl+bpX6rVbzR4kAjSSBuDgCqb1l4J68UYsQNalz5wOqLGYj4ZI0thGpyX5kc+LLZ9CL+kpqDovZA==} cpu: [x64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-arm64-msvc@4.18.0: - resolution: {integrity: sha512-7J6TkZQFGo9qBKH0pk2cEVSRhJbL6MtfWxth7Y5YmZs57Pi+4x6c2dStAUvaQkHQLnEQv1jzBUW43GvZW8OFqA==} + /@rollup/rollup-win32-arm64-msvc@4.21.1: + resolution: {integrity: sha512-3Q3brDgA86gHXWHklrwdREKIrIbxC0ZgU8lwpj0eEKGBQH+31uPqr0P2v11pn0tSIxHvcdOWxa4j+YvLNx1i6g==} cpu: [arm64] os: [win32] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-ia32-msvc@4.18.0: - resolution: {integrity: sha512-Txjh+IxBPbkUB9+SXZMpv+b/vnTEtFyfWZgJ6iyCmt2tdx0OF5WhFowLmnh8ENGNpfUlUZkdI//4IEmhwPieNg==} + /@rollup/rollup-win32-ia32-msvc@4.21.1: + resolution: {integrity: sha512-tNg+jJcKR3Uwe4L0/wY3Ro0H+u3nrb04+tcq1GSYzBEmKLeOQF2emk1whxlzNqb6MMrQ2JOcQEpuuiPLyRcSIw==} cpu: [ia32] os: [win32] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-x64-msvc@4.18.0: - resolution: {integrity: sha512-UOo5FdvOL0+eIVTgS4tIdbW+TtnBLWg1YBCcU2KWM7nuNwRz9bksDX1bekJJCpu25N1DVWaCwnT39dVQxzqS8g==} + /@rollup/rollup-win32-x64-msvc@4.21.1: + resolution: {integrity: sha512-xGiIH95H1zU7naUyTKEyOA/I0aexNMUdO9qRv0bLKN3qu25bBdrxZHqA3PTJ24YNN/GdMzG4xkDcd/GvjuhfLg==} cpu: [x64] os: [win32] requiresBuild: true @@ -1081,8 +1049,8 @@ packages: engines: {node: '>=6'} dev: true - /@swc/core-darwin-arm64@1.7.18: - resolution: {integrity: sha512-MwLc5U+VGPMZm8MjlFBjEB2wyT1EK0NNJ3tn+ps9fmxdFP+PL8EpMiY1O1F2t1ydy2OzBtZz81sycjM9RieFBg==} + /@swc/core-darwin-arm64@1.7.21: + resolution: {integrity: sha512-hh5uOZ7jWF66z2TRMhhXtWMQkssuPCSIZPy9VHf5KvZ46cX+5UeECDthchYklEVZQyy4Qr6oxfh4qff/5spoMA==} engines: {node: '>=10'} cpu: [arm64] os: [darwin] @@ -1090,8 +1058,8 @@ packages: dev: true optional: true - /@swc/core-darwin-x64@1.7.18: - resolution: {integrity: sha512-IkukOQUw7/14VkHp446OkYGCZEHqZg9pTmTdBawlUyz2JwZMSn2VodCl7aFSdGCsU4Cwni8zKA8CCgkCCAELhw==} + /@swc/core-darwin-x64@1.7.21: + resolution: {integrity: sha512-lTsPquqSierQ6jWiWM7NnYXXZGk9zx3NGkPLHjPbcH5BmyiauX0CC/YJYJx7YmS2InRLyALlGmidHkaF4JY28A==} engines: {node: '>=10'} cpu: [x64] os: [darwin] @@ -1099,8 +1067,8 @@ packages: dev: true optional: true - /@swc/core-linux-arm-gnueabihf@1.7.18: - resolution: {integrity: sha512-ATnb6jJaBeXCqrTUawWdoOy7eP9SCI7UMcfXlYIMxX4otKKspLPAEuGA5RaNxlCcj9ObyO0J3YGbtZ6hhD2pjg==} + /@swc/core-linux-arm-gnueabihf@1.7.21: + resolution: {integrity: sha512-AgSd0fnSzAqCvWpzzZCq75z62JVGUkkXEOpfdi99jj/tryPy38KdXJtkVWJmufPXlRHokGTBitalk33WDJwsbA==} engines: {node: '>=10'} cpu: [arm] os: [linux] @@ -1108,8 +1076,8 @@ packages: dev: true optional: true - /@swc/core-linux-arm64-gnu@1.7.18: - resolution: {integrity: sha512-poHtH7zL7lEp9K2inY90lGHJABWxURAOgWNeZqrcR5+jwIe7q5KBisysH09Zf/JNF9+6iNns+U0xgWTNJzBuGA==} + /@swc/core-linux-arm64-gnu@1.7.21: + resolution: {integrity: sha512-l+jw6RQ4Y43/8dIst0c73uQE+W3kCWrCFqMqC/xIuE/iqHOnvYK6YbA1ffOct2dImkHzNiKuoehGqtQAc6cNaQ==} engines: {node: '>=10'} cpu: [arm64] os: [linux] @@ -1117,8 +1085,8 @@ packages: dev: true optional: true - /@swc/core-linux-arm64-musl@1.7.18: - resolution: {integrity: sha512-qnNI1WmcOV7Wz1ZDyK6WrOlzLvJ01rnni8ec950mMHWkLRMP53QvCvhF3S+7gFplWBwWJTOOPPUqJp/PlSxWyQ==} + /@swc/core-linux-arm64-musl@1.7.21: + resolution: {integrity: sha512-29KKZXrTo/c9F1JFL9WsNvCa6UCdIVhHP5EfuYhlKbn5/YmSsNFkuHdUtZFEd5U4+jiShXDmgGCtLW2d08LIwg==} engines: {node: '>=10'} cpu: [arm64] os: [linux] @@ -1126,8 +1094,8 @@ packages: dev: true optional: true - /@swc/core-linux-x64-gnu@1.7.18: - resolution: {integrity: sha512-x9SCqCLzwtlqtD5At3I1a7Gco+EuXnzrJGoucmkpeQohshHuwa+cskqsXO6u1Dz0jXJEuHbBZB9va1wYYfjgFg==} + /@swc/core-linux-x64-gnu@1.7.21: + resolution: {integrity: sha512-HsP3JwddvQj5HvnjmOr+Bd5plEm6ccpfP5wUlm3hywzvdVkj+yR29bmD7UwpV/1zCQ60Ry35a7mXhKI6HQxFgw==} engines: {node: '>=10'} cpu: [x64] os: [linux] @@ -1135,8 +1103,8 @@ packages: dev: true optional: true - /@swc/core-linux-x64-musl@1.7.18: - resolution: {integrity: sha512-qtj8iOpMMgKjzxTv+islmEY0JBsbd93nka0gzcTTmGZxKtL5jSUsYQvkxwNPZr5M9NU1fgaR3n1vE6lFmtY0IQ==} + /@swc/core-linux-x64-musl@1.7.21: + resolution: {integrity: sha512-hYKLVeUTHqvFK628DFJEwxoX6p42T3HaQ4QjNtf3oKhiJWFh9iTRUrN/oCB5YI3R9WMkFkKh+99gZ/Dd0T5lsg==} engines: {node: '>=10'} cpu: [x64] os: [linux] @@ -1144,8 +1112,8 @@ packages: dev: true optional: true - /@swc/core-win32-arm64-msvc@1.7.18: - resolution: {integrity: sha512-ltX/Ol9+Qu4SXmISCeuwVgAjSa8nzHTymknpozzVMgjXUoZMoz6lcynfKL1nCh5XLgqh0XNHUKLti5YFF8LrrA==} + /@swc/core-win32-arm64-msvc@1.7.21: + resolution: {integrity: sha512-qyWAKW10aMBe6iUqeZ7NAJIswjfggVTUpDINpQGUJhz+pR71YZDidXgZXpaDB84YyDB2JAlRqd1YrLkl7CMiIw==} engines: {node: '>=10'} cpu: [arm64] os: [win32] @@ -1153,8 +1121,8 @@ packages: dev: true optional: true - /@swc/core-win32-ia32-msvc@1.7.18: - resolution: {integrity: sha512-RgTcFP3wgyxnQbTCJrlgBJmgpeTXo8t807GU9GxApAXfpLZJ3swJ2GgFUmIJVdLWyffSHF5BEkF3FmF6mtH5AQ==} + /@swc/core-win32-ia32-msvc@1.7.21: + resolution: {integrity: sha512-cy61wS3wgH5mEwBiQ5w6/FnQrchBDAdPsSh0dKSzNmI+4K8hDxS8uzdBycWqJXO0cc+mA77SIlwZC3hP3Kum2g==} engines: {node: '>=10'} cpu: [ia32] os: [win32] @@ -1162,8 +1130,8 @@ packages: dev: true optional: true - /@swc/core-win32-x64-msvc@1.7.18: - resolution: {integrity: sha512-XbZ0wAgzR757+DhQcnv60Y/bK9yuWPhDNRQVFFQVRsowvK3+c6EblyfUSytIidpXgyYFzlprq/9A9ZlO/wvDWw==} + /@swc/core-win32-x64-msvc@1.7.21: + resolution: {integrity: sha512-/rexGItJURNJOdae+a48M+loT74nsEU+PyRRVAkZMKNRtLoYFAr0cpDlS5FodIgGunp/nqM0bst4H2w6Y05IKA==} engines: {node: '>=10'} cpu: [x64] os: [win32] @@ -1171,8 +1139,8 @@ packages: dev: true optional: true - /@swc/core@1.7.18: - resolution: {integrity: sha512-qL9v5N5S38ijmqiQRvCFUUx2vmxWT/JJ2rswElnyaHkOHuVoAFhBB90Ywj4RKjh3R0zOjhEcemENTyF3q3G6WQ==} + /@swc/core@1.7.21: + resolution: {integrity: sha512-7/cN0SZ+y2V6e0hsDD8koGR0QVh7Jl3r756bwaHLLSN+kReoUb/yVcLsA8iTn90JLME3DkQK4CPjxDCQiyMXNg==} engines: {node: '>=10'} requiresBuild: true peerDependencies: @@ -1184,16 +1152,16 @@ packages: '@swc/counter': 0.1.3 '@swc/types': 0.1.12 optionalDependencies: - '@swc/core-darwin-arm64': 1.7.18 - '@swc/core-darwin-x64': 1.7.18 - '@swc/core-linux-arm-gnueabihf': 1.7.18 - '@swc/core-linux-arm64-gnu': 1.7.18 - '@swc/core-linux-arm64-musl': 1.7.18 - '@swc/core-linux-x64-gnu': 1.7.18 - '@swc/core-linux-x64-musl': 1.7.18 - '@swc/core-win32-arm64-msvc': 1.7.18 - '@swc/core-win32-ia32-msvc': 1.7.18 - '@swc/core-win32-x64-msvc': 1.7.18 + '@swc/core-darwin-arm64': 1.7.21 + '@swc/core-darwin-x64': 1.7.21 + '@swc/core-linux-arm-gnueabihf': 1.7.21 + '@swc/core-linux-arm64-gnu': 1.7.21 + '@swc/core-linux-arm64-musl': 1.7.21 + '@swc/core-linux-x64-gnu': 1.7.21 + '@swc/core-linux-x64-musl': 1.7.21 + '@swc/core-win32-arm64-msvc': 1.7.21 + '@swc/core-win32-ia32-msvc': 1.7.21 + '@swc/core-win32-x64-msvc': 1.7.21 dev: true /@swc/counter@0.1.3: @@ -1218,7 +1186,7 @@ packages: engines: {node: '>=12'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 '@types/aria-query': 5.0.4 aria-query: 5.1.3 chalk: 4.1.2 @@ -1227,12 +1195,11 @@ packages: pretty-format: 27.5.1 dev: true - /@testing-library/jest-dom@6.4.8: - resolution: {integrity: sha512-JD0G+Zc38f5MBHA4NgxQMR5XtO5Jx9g86jqturNTt2WUfRmLDIY7iKkWHDCCTiDuFMre6nxAD5wHw9W5kI4rGw==} + /@testing-library/jest-dom@6.5.0: + resolution: {integrity: sha512-xGGHpBXYSHUUr6XsKBfs85TWlYKpTc37cSBBVrXcib2MkHLboWlkClhWF37JKlDb9KEq3dHs+f2xR7XJEWGBxA==} engines: {node: '>=14', npm: '>=6', yarn: '>=1'} dependencies: '@adobe/css-tools': 4.4.0 - '@babel/runtime': 7.24.7 aria-query: 5.3.0 chalk: 3.0.0 css.escape: 1.5.1 @@ -1248,7 +1215,7 @@ packages: react: <18.0.0 react-dom: <18.0.0 dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 '@testing-library/dom': 8.20.1 '@types/react-dom': 16.8.4 react: 16.14.0 @@ -1277,13 +1244,6 @@ packages: resolution: {integrity: sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==} dev: true - /@types/history@5.0.0: - resolution: {integrity: sha512-hy8b7Y1J8OGe6LbAjj3xniQrj3v6lsivCcrmf4TzSgPzLkhIeKgc5IZnT7ReIqmEuodjfO8EYAuoFvIrHi/+jQ==} - deprecated: This is a stub types definition. history provides its own type definitions, so you do not need this installed. - dependencies: - history: 5.3.0 - dev: true - /@types/js-levenshtein@1.1.3: resolution: {integrity: sha512-jd+Q+sD20Qfu9e2aEXogiO3vpOC1PYJOUdyN9gvs4Qrvkg4wF43L5OhqrPeokdv8TL0/mXoYfpkcoGZMNN2pkQ==} dev: true @@ -1295,17 +1255,17 @@ packages: /@types/keyv@3.1.4: resolution: {integrity: sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==} dependencies: - '@types/node': 20.14.8 + '@types/node': 22.5.1 dev: true /@types/ms@0.7.34: resolution: {integrity: sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==} dev: true - /@types/node@20.14.8: - resolution: {integrity: sha512-DO+2/jZinXfROG7j7WKFn/3C6nFwxy2lLpgLjEXJz+0XKphZlTLJ14mo8Vfg8X5BWN6XjyESXq+LcYdT7tR3bA==} + /@types/node@22.5.1: + resolution: {integrity: sha512-KkHsxej0j9IW1KKOOAA/XBA0z08UFSrRQHErzEfA3Vgq57eXIMYboIlHJuYIfd+lwCQjtKqUu3UnmKbtUc9yRw==} dependencies: - undici-types: 5.26.5 + undici-types: 6.19.8 dev: true /@types/parse-json@4.0.2: @@ -1322,10 +1282,10 @@ packages: '@types/react': 16.8.15 dev: true - /@types/react-router-dom@4.3.5: - resolution: {integrity: sha512-eFajSUASYbPHg2BDM1G8Btx+YqGgvROPIg6sBhl3O4kbDdYXdFdfrgQFf/pcBuQVObjfT9AL/dd15jilR5DIEA==} + /@types/react-router-dom@5.3.3: + resolution: {integrity: sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==} dependencies: - '@types/history': 5.0.0 + '@types/history': 4.7.11 '@types/react': 16.8.15 '@types/react-router': 5.1.20 dev: true @@ -1342,11 +1302,11 @@ packages: dependencies: '@types/react': 16.8.15 '@types/react-dom': 16.8.4 - '@types/react-transition-group': 4.4.10 + '@types/react-transition-group': 4.4.11 dev: true - /@types/react-transition-group@4.4.10: - resolution: {integrity: sha512-hT/+s0VQs2ojCX823m60m5f0sL5idt9SO6Tj6Dg+rdphGPIeJbJ6CxvBYkgkGKrYeDjvIpKTR38UzmtHJOGW3Q==} + /@types/react-transition-group@4.4.11: + resolution: {integrity: sha512-RM05tAniPZ5DZPzzNFP+DmrcOdD0efDUxMy3145oljWSl3x9ZV5vhme98gTxFrj2lhXvmGNnUiuDyJgY9IKkNA==} dependencies: '@types/react': 16.8.15 dev: true @@ -1361,7 +1321,7 @@ packages: /@types/responselike@1.0.3: resolution: {integrity: sha512-H/+L+UkTV33uf49PH5pCAUBVPNj2nDBXTN+qS1dOwyyg24l3CcicicCA7ca+HMvJBZcFgl5r8e+RR6elsb4Lyw==} dependencies: - '@types/node': 20.14.8 + '@types/node': 22.5.1 dev: true /@types/semver@7.5.8: @@ -1371,7 +1331,7 @@ packages: /@types/set-cookie-parser@2.4.10: resolution: {integrity: sha512-GGmQVGpQWUe5qglJozEjZV/5dyxbOOZ0LHe/lqyWssB88Y4svNfst0uqBVscdDeIKl5Jy5+aPSvy7mI9tYRguw==} dependencies: - '@types/node': 20.14.8 + '@types/node': 22.5.1 dev: true /@typescript-eslint/eslint-plugin@5.62.0(@typescript-eslint/parser@5.62.0)(eslint@7.32.0)(typescript@4.9.5): @@ -1385,17 +1345,17 @@ packages: typescript: optional: true dependencies: - '@eslint-community/regexpp': 4.10.1 + '@eslint-community/regexpp': 4.11.0 '@typescript-eslint/parser': 5.62.0(eslint@7.32.0)(typescript@4.9.5) '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/type-utils': 5.62.0(eslint@7.32.0)(typescript@4.9.5) '@typescript-eslint/utils': 5.62.0(eslint@7.32.0)(typescript@4.9.5) - debug: 4.3.5 + debug: 4.3.6 eslint: 7.32.0 graphemer: 1.4.0 - ignore: 5.3.1 + ignore: 5.3.2 natural-compare-lite: 1.4.0 - semver: 7.6.2 + semver: 7.6.3 tsutils: 3.21.0(typescript@4.9.5) typescript: 4.9.5 transitivePeerDependencies: @@ -1415,7 +1375,7 @@ packages: '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/types': 5.62.0 '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) - debug: 4.3.5 + debug: 4.3.6 eslint: 7.32.0 typescript: 4.9.5 transitivePeerDependencies: @@ -1442,7 +1402,7 @@ packages: dependencies: '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) '@typescript-eslint/utils': 5.62.0(eslint@7.32.0)(typescript@4.9.5) - debug: 4.3.5 + debug: 4.3.6 eslint: 7.32.0 tsutils: 3.21.0(typescript@4.9.5) typescript: 4.9.5 @@ -1466,10 +1426,10 @@ packages: dependencies: '@typescript-eslint/types': 5.62.0 '@typescript-eslint/visitor-keys': 5.62.0 - debug: 4.3.5 + debug: 4.3.6 globby: 11.1.0 is-glob: 4.0.3 - semver: 7.6.2 + semver: 7.6.3 tsutils: 3.21.0(typescript@4.9.5) typescript: 4.9.5 transitivePeerDependencies: @@ -1490,7 +1450,7 @@ packages: '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) eslint: 7.32.0 eslint-scope: 5.1.1 - semver: 7.6.2 + semver: 7.6.3 transitivePeerDependencies: - supports-color - typescript @@ -1509,7 +1469,7 @@ packages: peerDependencies: vite: ^4 || ^5 dependencies: - '@swc/core': 1.7.18 + '@swc/core': 1.7.21 vite: 4.5.3(less@3.13.1) transitivePeerDependencies: - '@swc/helpers' @@ -1520,7 +1480,7 @@ packages: dependencies: '@vitest/spy': 1.6.0 '@vitest/utils': 1.6.0 - chai: 4.4.1 + chai: 4.5.0 dev: true /@vitest/runner@1.6.0: @@ -1534,7 +1494,7 @@ packages: /@vitest/snapshot@1.6.0: resolution: {integrity: sha512-+Hx43f8Chus+DCmygqqfetcAZrDJwvTj0ymqjQq4CvmpKFSTVteEOBzCusu1x2tt4OJcvBflyHUE0DZSLgEMtQ==} dependencies: - magic-string: 0.30.10 + magic-string: 0.30.11 pathe: 1.1.2 pretty-format: 29.7.0 dev: true @@ -1585,7 +1545,7 @@ packages: resolution: {integrity: sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==} engines: {node: '>=0.4.0'} dependencies: - acorn: 8.12.0 + acorn: 8.12.1 dev: true /acorn@7.4.1: @@ -1594,8 +1554,8 @@ packages: hasBin: true dev: true - /acorn@8.12.0: - resolution: {integrity: sha512-RTvkC4w+KNXrM39/lWCUaG0IbRkWdCv7W/IOW9oU6SawyxulvkQy5HQPVTKxEjczcUvapcrw3cFx/60VN/NRNw==} + /acorn@8.12.1: + resolution: {integrity: sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==} engines: {node: '>=0.4.0'} hasBin: true dev: true @@ -1621,7 +1581,7 @@ packages: resolution: {integrity: sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==} engines: {node: '>= 14'} dependencies: - debug: 4.3.5 + debug: 4.3.6 transitivePeerDependencies: - supports-color dev: true @@ -1635,13 +1595,13 @@ packages: uri-js: 4.4.1 dev: true - /ajv@8.16.0: - resolution: {integrity: sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==} + /ajv@8.17.1: + resolution: {integrity: sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==} dependencies: fast-deep-equal: 3.1.3 + fast-uri: 3.0.1 json-schema-traverse: 1.0.0 require-from-string: 2.0.2 - uri-js: 4.4.1 dev: true /ansi-align@3.0.1: @@ -1714,7 +1674,7 @@ packages: '@ant-design/colors': 5.1.1 '@ant-design/icons': 4.8.3(react-dom@16.14.0)(react@16.14.0) '@ant-design/react-slick': 0.28.4(react@16.14.0) - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 array-tree-filter: 2.1.0 classnames: 2.5.1 copy-to-clipboard: 3.3.3 @@ -1859,8 +1819,8 @@ packages: resolution: {integrity: sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==} dev: true - /aws4@1.13.0: - resolution: {integrity: sha512-3AungXC4I8kKsS9PuS4JH2nc+0bVY/mjgrephHTIi8fpEeGsTHBUJeosp0Wc1myYMElmD0B3Oc4XL/HVJ4PV2g==} + /aws4@1.13.2: + resolution: {integrity: sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==} dev: true /axios@0.28.1: @@ -1893,7 +1853,7 @@ packages: /babel-plugin-macros@2.8.0: resolution: {integrity: sha512-SEP5kJpfGYqYKpBrj5XU3ahw5p5GOHJ0U5ssOSQ/WBVdwkD2Dzlce95exQTs3jOVWPPKLBN2rlEWkCK7dSmLvg==} dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 cosmiconfig: 6.0.0 resolve: 1.22.8 dev: false @@ -2049,8 +2009,8 @@ packages: resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} dev: true - /chai@4.4.1: - resolution: {integrity: sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==} + /chai@4.5.0: + resolution: {integrity: sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==} engines: {node: '>=4'} dependencies: assertion-error: 1.1.0 @@ -2059,7 +2019,7 @@ packages: get-func-name: 2.0.2 loupe: 2.3.7 pathval: 1.1.1 - type-detect: 4.0.8 + type-detect: 4.1.0 dev: true /chalk@2.4.2: @@ -2203,7 +2163,7 @@ packages: resolution: {integrity: sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==} engines: {node: '>= 0.6'} dependencies: - mime-db: 1.52.0 + mime-db: 1.53.0 dev: true /compression@1.7.4: @@ -2410,11 +2370,11 @@ packages: resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==} engines: {node: '>=0.11'} dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 dev: false - /dayjs@1.11.11: - resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==} + /dayjs@1.11.13: + resolution: {integrity: sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==} dev: false /debug@2.6.9: @@ -2439,8 +2399,8 @@ packages: ms: 2.0.0 dev: true - /debug@4.3.5: - resolution: {integrity: sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==} + /debug@4.3.6: + resolution: {integrity: sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -2470,7 +2430,7 @@ packages: resolution: {integrity: sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg==} engines: {node: '>=6'} dependencies: - type-detect: 4.0.8 + type-detect: 4.1.0 dev: true /deep-equal@2.2.3: @@ -2587,7 +2547,7 @@ packages: /dom-helpers@5.2.1: resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==} dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 csstype: 3.1.3 dev: false @@ -2613,11 +2573,11 @@ packages: safer-buffer: 2.1.2 dev: true - /echarts@5.5.0: - resolution: {integrity: sha512-rNYnNCzqDAPCr4m/fqyUFv7fD9qIsd50S6GDFgO1DxZhncCsNsG7IfUlAlvZe5oSEQxtsjnHiUuppzccry93Xw==} + /echarts@5.5.1: + resolution: {integrity: sha512-Fce8upazaAXUVUVsjgV6mBnGuqgO+JNDlcgF79Dksy4+wgGpQB2lmYoO4TSweFg/mZITdpGHomw/cNBJZj1icA==} dependencies: tslib: 2.3.0 - zrender: 5.5.0 + zrender: 5.6.0 dev: false /ee-first@1.1.1: @@ -2930,7 +2890,7 @@ packages: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.3 - debug: 4.3.5 + debug: 4.3.6 doctrine: 3.0.0 enquirer: 2.4.1 escape-string-regexp: 4.0.0 @@ -2938,7 +2898,7 @@ packages: eslint-utils: 2.1.0 eslint-visitor-keys: 2.1.0 espree: 7.3.1 - esquery: 1.5.0 + esquery: 1.6.0 esutils: 2.0.3 fast-deep-equal: 3.1.3 file-entry-cache: 6.0.1 @@ -2958,7 +2918,7 @@ packages: optionator: 0.9.4 progress: 2.0.3 regexpp: 3.2.0 - semver: 7.6.2 + semver: 7.6.3 strip-ansi: 6.0.1 strip-json-comments: 3.1.1 table: 6.8.2 @@ -2983,8 +2943,8 @@ packages: hasBin: true dev: true - /esquery@1.5.0: - resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} + /esquery@1.6.0: + resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==} engines: {node: '>=0.10'} dependencies: estraverse: 5.3.0 @@ -3059,7 +3019,7 @@ packages: /express-urlrewrite@1.4.0: resolution: {integrity: sha512-PI5h8JuzoweS26vFizwQl6UTF25CAHSggNv0J25Dn/IKZscJHWZzPrI5z2Y2jgOzIaw2qh8l6+/jUcig23Z2SA==} dependencies: - debug: 4.3.5 + debug: 4.3.6 path-to-regexp: 1.8.0 transitivePeerDependencies: - supports-color @@ -3138,7 +3098,7 @@ packages: '@nodelib/fs.walk': 1.2.8 glob-parent: 5.1.2 merge2: 1.4.1 - micromatch: 4.0.7 + micromatch: 4.0.8 dev: true /fast-json-stable-stringify@2.1.0: @@ -3149,6 +3109,10 @@ packages: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} dev: true + /fast-uri@3.0.1: + resolution: {integrity: sha512-MWipKbbYiYI0UC7cl8m/i/IWTqfC8YXsqjzybjddLsFjStroQzsHXkc73JutMvBiXmOvapk+axIl79ig5t55Bw==} + dev: true + /fastq@1.17.1: resolution: {integrity: sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==} dependencies: @@ -3236,8 +3200,8 @@ packages: is-callable: 1.2.7 dev: true - /foreground-child@3.2.1: - resolution: {integrity: sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA==} + /foreground-child@3.3.0: + resolution: {integrity: sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==} engines: {node: '>=14'} dependencies: cross-spawn: 7.0.3 @@ -3378,14 +3342,13 @@ packages: resolution: {integrity: sha512-m5blUd3/OqDTWwzBBtWBPrGlAzatRywHameHeekAZyZrskYouOGdNB8T/q6JucucvJXtOuyHIn0/Yia7iDasDw==} dev: true - /glob@10.4.2: - resolution: {integrity: sha512-GwMlUF6PkPo3Gk21UxkCohOv0PLcIXVtKyLlpEI28R/cO/4eNOdmLk3CMW1wROV/WR/EsZOWAfBbBOqYvs88/w==} - engines: {node: '>=16 || 14 >=14.18'} + /glob@10.4.5: + resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} hasBin: true dependencies: - foreground-child: 3.2.1 - jackspeak: 3.4.0 - minimatch: 9.0.4 + foreground-child: 3.3.0 + jackspeak: 3.4.3 + minimatch: 9.0.5 minipass: 7.1.2 package-json-from-dist: 1.0.0 path-scurry: 1.11.1 @@ -3437,7 +3400,7 @@ packages: array-union: 2.1.0 dir-glob: 3.0.1 fast-glob: 3.3.2 - ignore: 5.3.1 + ignore: 5.3.2 merge2: 1.4.1 slash: 3.0.0 dev: true @@ -3551,7 +3514,7 @@ packages: /history@4.10.1: resolution: {integrity: sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==} dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 loose-envify: 1.4.0 resolve-pathname: 3.0.0 tiny-invariant: 1.3.3 @@ -3559,12 +3522,6 @@ packages: value-equal: 1.0.1 dev: false - /history@5.3.0: - resolution: {integrity: sha512-ZqaKwjjrAYUYfLG+htGaIIZ4nioX2L70ZUMIFysS3xvBsSG4x/n1V6TXV3N8ZYNuFGlDirFg32T7B6WOUPDYcQ==} - dependencies: - '@babel/runtime': 7.24.7 - dev: true - /hoist-non-react-statics@3.3.2: resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} dependencies: @@ -3602,7 +3559,7 @@ packages: engines: {node: '>= 14'} dependencies: agent-base: 7.1.1 - debug: 4.3.5 + debug: 4.3.6 transitivePeerDependencies: - supports-color dev: true @@ -3621,7 +3578,7 @@ packages: engines: {node: '>= 14'} dependencies: agent-base: 7.1.1 - debug: 4.3.5 + debug: 4.3.6 transitivePeerDependencies: - supports-color dev: true @@ -3654,8 +3611,8 @@ packages: engines: {node: '>= 4'} dev: true - /ignore@5.3.1: - resolution: {integrity: sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==} + /ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} engines: {node: '>= 4'} dev: true @@ -3791,8 +3748,8 @@ packages: ci-info: 2.0.0 dev: true - /is-core-module@2.14.0: - resolution: {integrity: sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==} + /is-core-module@2.15.1: + resolution: {integrity: sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==} engines: {node: '>= 0.4'} dependencies: hasown: 2.0.2 @@ -4005,9 +3962,8 @@ packages: resolution: {integrity: sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==} dev: true - /jackspeak@3.4.0: - resolution: {integrity: sha512-JVYhQnN59LVPFCEcVa2C3CrEKYacvjRfqIQl+h8oi91aLYQVWRYbxjPcv1bUiUy/kLmQaANrYfNMCO3kuEDHfw==} - engines: {node: '>=14'} + /jackspeak@3.4.3: + resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} dependencies: '@isaacs/cliui': 8.0.2 optionalDependencies: @@ -4042,8 +3998,8 @@ packages: resolution: {integrity: sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==} dev: true - /jsdom@24.1.1: - resolution: {integrity: sha512-5O1wWV99Jhq4DV7rCLIoZ/UIhyQeDR7wHVyZAHAshbrvZsLs+Xzz7gtwnlJTJDjleiTKh54F4dXrX70vJQTyJQ==} + /jsdom@24.1.3: + resolution: {integrity: sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==} engines: {node: '>=18'} peerDependencies: canvas: ^2.11.2 @@ -4211,7 +4167,7 @@ packages: image-size: 0.5.5 make-dir: 2.1.0 mime: 1.6.0 - native-request: 1.1.0 + native-request: 1.1.2 source-map: 0.6.1 /levn@0.4.1: @@ -4240,7 +4196,7 @@ packages: engines: {node: '>=14'} dependencies: mlly: 1.7.1 - pkg-types: 1.1.1 + pkg-types: 1.2.0 dev: true /locate-path@3.0.0: @@ -4308,9 +4264,8 @@ packages: engines: {node: '>=8'} dev: true - /lru-cache@10.2.2: - resolution: {integrity: sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==} - engines: {node: 14 || >=16.14} + /lru-cache@10.4.3: + resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} dev: true /lru-cache@4.1.5: @@ -4325,10 +4280,10 @@ packages: hasBin: true dev: true - /magic-string@0.30.10: - resolution: {integrity: sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==} + /magic-string@0.30.11: + resolution: {integrity: sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==} dependencies: - '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/sourcemap-codec': 1.5.0 dev: true /make-dir@1.3.0: @@ -4391,8 +4346,8 @@ packages: engines: {node: '>= 0.6'} dev: true - /micromatch@4.0.7: - resolution: {integrity: sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==} + /micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} engines: {node: '>=8.6'} dependencies: braces: 3.0.3 @@ -4403,6 +4358,11 @@ packages: resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} engines: {node: '>= 0.6'} + /mime-db@1.53.0: + resolution: {integrity: sha512-oHlN/w+3MQ3rba9rqFr6V/ypF10LSkdwUysQL7GkXoTgIWeV+tcXGA852TBxH+gsh8UWoyhR1hKcoMJTuWflpg==} + engines: {node: '>= 0.6'} + dev: true + /mime-types@2.1.35: resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} engines: {node: '>= 0.6'} @@ -4452,8 +4412,8 @@ packages: brace-expansion: 1.1.11 dev: true - /minimatch@9.0.4: - resolution: {integrity: sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==} + /minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} engines: {node: '>=16 || 14 >=14.17'} dependencies: brace-expansion: 2.0.1 @@ -4471,10 +4431,10 @@ packages: /mlly@1.7.1: resolution: {integrity: sha512-rrVRZRELyQzrIUAVMHxP97kv+G786pHmOKzuFII8zDYahFBS7qnHh2AlYSl1GAHhaMPCz6/oHjVMcfFYgFYHgA==} dependencies: - acorn: 8.12.0 + acorn: 8.12.1 pathe: 1.1.2 - pkg-types: 1.1.1 - ufo: 1.5.3 + pkg-types: 1.2.0 + ufo: 1.5.4 dev: true /moment@2.30.1: @@ -4563,8 +4523,8 @@ packages: hasBin: true dev: true - /native-request@1.1.0: - resolution: {integrity: sha512-uZ5rQaeRn15XmpgE0xoPL8YWqcX90VtCFglYwAgkvKM5e8fog+vePLAhHxuuv/gRkrQxIeh5U3q9sMNUrENqWw==} + /native-request@1.1.2: + resolution: {integrity: sha512-/etjwrK0J4Ebbcnt35VMWnfiUX/B04uwGJxyJInagxDqf2z5drSt/lsOvEMWGYunz1kaLZAFrV4NDAbOoDKvAQ==} requiresBuild: true optional: true @@ -4782,7 +4742,7 @@ packages: resolution: {integrity: sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==} engines: {node: '>=18'} dependencies: - yocto-queue: 1.0.0 + yocto-queue: 1.1.1 dev: true /p-locate@3.0.0: @@ -4887,7 +4847,7 @@ packages: resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} engines: {node: '>=16 || 14 >=14.18'} dependencies: - lru-cache: 10.2.2 + lru-cache: 10.4.3 minipass: 7.1.2 dev: true @@ -4957,8 +4917,8 @@ packages: engines: {node: '>= 6'} dev: true - /pkg-types@1.1.1: - resolution: {integrity: sha512-ko14TjmDuQJ14zsotODv7dBlwxKhUKQEhuhmbqo1uCi9BB0Z2alo/wAXg6q1dTR5TyuqYyWhjtfe/Tsh+X28jQ==} + /pkg-types@1.2.0: + resolution: {integrity: sha512-+ifYuSSqOQ8CqP4MbZA5hDpb97n3E8SVWdJe+Wms9kj745lmd3b7EZJiqvmLwAlmRfjrI7Hi5z3kdBJ93lFNPA==} dependencies: confbox: 0.1.7 mlly: 1.7.1 @@ -4981,8 +4941,8 @@ packages: engines: {node: '>= 0.4'} dev: true - /postcss@8.4.38: - resolution: {integrity: sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==} + /postcss@8.4.41: + resolution: {integrity: sha512-TesUflQ0WKZqAvg52PWL6kHgLKP6xB6heTOdoYM0Wt2UHyxNa4K25EZZMgKns3BH1RLVbZCREPpLY0rhnNoHVQ==} engines: {node: ^10 || ^12 || >=14} dependencies: nanoid: 3.3.7 @@ -5128,7 +5088,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 dom-align: 1.12.4 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5143,7 +5103,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 array-tree-filter: 2.1.0 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5158,7 +5118,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5170,7 +5130,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5185,7 +5145,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5199,7 +5159,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5212,7 +5172,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5226,7 +5186,7 @@ packages: react: '>= 16.9.0' react-dom: '>= 16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 async-validator: 3.5.2 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5239,7 +5199,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-dialog: 8.5.3(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5253,7 +5213,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5266,7 +5226,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-menu: 8.10.8(react-dom@16.14.0)(react@16.14.0) rc-textarea: 0.3.7(react-dom@16.14.0)(react@16.14.0) @@ -5282,7 +5242,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 mini-store: 3.0.6(react-dom@16.14.0)(react@16.14.0) rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) @@ -5300,7 +5260,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5314,7 +5274,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5328,7 +5288,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5342,7 +5302,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5355,10 +5315,10 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 date-fns: 2.30.0 - dayjs: 1.11.11 + dayjs: 1.11.13 moment: 2.30.1 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5373,7 +5333,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5386,7 +5346,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5399,7 +5359,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5414,13 +5374,13 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-overflow: 1.3.2(react-dom@16.14.0)(react@16.14.0) rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) - rc-virtual-list: 3.14.3(react-dom@16.14.0)(react@16.14.0) + rc-virtual-list: 3.14.5(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) dev: false @@ -5432,7 +5392,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-tooltip: 5.0.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5448,7 +5408,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5461,7 +5421,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5475,7 +5435,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5491,7 +5451,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-dropdown: 3.2.5(react-dom@16.14.0)(react@16.14.0) rc-menu: 8.10.8(react-dom@16.14.0)(react@16.14.0) @@ -5507,7 +5467,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5522,7 +5482,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5534,7 +5494,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-select: 12.1.13(react-dom@16.14.0)(react@16.14.0) rc-tree: 4.1.5(react-dom@16.14.0)(react@16.14.0) @@ -5550,11 +5510,11 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) - rc-virtual-list: 3.14.3(react-dom@16.14.0)(react@16.14.0) + rc-virtual-list: 3.14.5(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) dev: false @@ -5566,7 +5526,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-align: 4.0.15(react-dom@16.14.0)(react@16.14.0) rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) @@ -5581,7 +5541,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5594,20 +5554,20 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) react-is: 18.3.1 dev: false - /rc-virtual-list@3.14.3(react-dom@16.14.0)(react@16.14.0): - resolution: {integrity: sha512-6+6wiEhdqakNBnbRJymgMlh+90qpkgqherTRo1l1cX7mK6F9hWsazPczmP0lA+64yhC9/t+M9Dh5pjvDWimn8A==} + /rc-virtual-list@3.14.5(react-dom@16.14.0)(react@16.14.0): + resolution: {integrity: sha512-ZMOnkCLv2wUN8Jz7yI4XiSLa9THlYvf00LuMhb1JlsQCewuU7ydPuHw1rGVPhe9VZYl/5UqODtNd7QKJ2DMGfg==} engines: {node: '>=8.x'} peerDependencies: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5660,7 +5620,7 @@ packages: peerDependencies: react: '>=15' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 history: 4.10.1 loose-envify: 1.4.0 prop-types: 15.8.1 @@ -5675,7 +5635,7 @@ packages: peerDependencies: react: '>=15' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 history: 4.10.1 hoist-non-react-statics: 3.3.2 loose-envify: 1.4.0 @@ -5693,7 +5653,7 @@ packages: react: ^16.8.0 || ^17.0.0 react-dom: ^16.8.0 || ^17.0.0 dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 '@emotion/cache': 10.0.29 '@emotion/core': 10.3.1(react@16.14.0) '@emotion/css': 10.0.27 @@ -5713,7 +5673,7 @@ packages: react: '>=16.6.0' react-dom: '>=16.6.0' dependencies: - '@babel/runtime': 7.24.7 + '@babel/runtime': 7.25.4 dom-helpers: 5.2.1 loose-envify: 1.4.0 prop-types: 15.8.1 @@ -5810,7 +5770,7 @@ packages: deprecated: request has been deprecated, see https://github.com/request/request/issues/3142 dependencies: aws-sign2: 0.7.0 - aws4: 1.13.0 + aws4: 1.13.2 caseless: 0.12.0 combined-stream: 1.0.8 extend: 3.0.2 @@ -5865,7 +5825,7 @@ packages: resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} hasBin: true dependencies: - is-core-module: 2.14.0 + is-core-module: 2.15.1 path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 @@ -5904,29 +5864,29 @@ packages: fsevents: 2.3.3 dev: true - /rollup@4.18.0: - resolution: {integrity: sha512-QmJz14PX3rzbJCN1SG4Xe/bAAX2a6NpCP8ab2vfu2GiUr8AQcr2nCV/oEO3yneFarB67zk8ShlIyWb2LGTb3Sg==} + /rollup@4.21.1: + resolution: {integrity: sha512-ZnYyKvscThhgd3M5+Qt3pmhO4jIRR5RGzaSovB6Q7rGNrK5cUncrtLmcTTJVSdcKXyZjW8X8MB0JMSuH9bcAJg==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true dependencies: '@types/estree': 1.0.5 optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.18.0 - '@rollup/rollup-android-arm64': 4.18.0 - '@rollup/rollup-darwin-arm64': 4.18.0 - '@rollup/rollup-darwin-x64': 4.18.0 - '@rollup/rollup-linux-arm-gnueabihf': 4.18.0 - '@rollup/rollup-linux-arm-musleabihf': 4.18.0 - '@rollup/rollup-linux-arm64-gnu': 4.18.0 - '@rollup/rollup-linux-arm64-musl': 4.18.0 - '@rollup/rollup-linux-powerpc64le-gnu': 4.18.0 - '@rollup/rollup-linux-riscv64-gnu': 4.18.0 - '@rollup/rollup-linux-s390x-gnu': 4.18.0 - '@rollup/rollup-linux-x64-gnu': 4.18.0 - '@rollup/rollup-linux-x64-musl': 4.18.0 - '@rollup/rollup-win32-arm64-msvc': 4.18.0 - '@rollup/rollup-win32-ia32-msvc': 4.18.0 - '@rollup/rollup-win32-x64-msvc': 4.18.0 + '@rollup/rollup-android-arm-eabi': 4.21.1 + '@rollup/rollup-android-arm64': 4.21.1 + '@rollup/rollup-darwin-arm64': 4.21.1 + '@rollup/rollup-darwin-x64': 4.21.1 + '@rollup/rollup-linux-arm-gnueabihf': 4.21.1 + '@rollup/rollup-linux-arm-musleabihf': 4.21.1 + '@rollup/rollup-linux-arm64-gnu': 4.21.1 + '@rollup/rollup-linux-arm64-musl': 4.21.1 + '@rollup/rollup-linux-powerpc64le-gnu': 4.21.1 + '@rollup/rollup-linux-riscv64-gnu': 4.21.1 + '@rollup/rollup-linux-s390x-gnu': 4.21.1 + '@rollup/rollup-linux-x64-gnu': 4.21.1 + '@rollup/rollup-linux-x64-musl': 4.21.1 + '@rollup/rollup-win32-arm64-msvc': 4.21.1 + '@rollup/rollup-win32-ia32-msvc': 4.21.1 + '@rollup/rollup-win32-x64-msvc': 4.21.1 fsevents: 2.3.3 dev: true @@ -5952,7 +5912,7 @@ packages: /rxjs@7.8.1: resolution: {integrity: sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==} dependencies: - tslib: 2.6.3 + tslib: 2.7.0 dev: true /safe-array-concat@1.1.2: @@ -6025,8 +5985,8 @@ packages: hasBin: true dev: true - /semver@7.6.2: - resolution: {integrity: sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==} + /semver@7.6.3: + resolution: {integrity: sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==} engines: {node: '>=10'} hasBin: true dev: true @@ -6072,8 +6032,8 @@ packages: resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==} dev: true - /set-cookie-parser@2.6.0: - resolution: {integrity: sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==} + /set-cookie-parser@2.7.0: + resolution: {integrity: sha512-lXLOiqpkUumhRdFF3k1osNXCy9akgx/dyPZ5p8qAg9seJzXr5ZrlqZuWIMuY6ejOsVLE6flJ5/h3lsn57fQ/PQ==} dev: true /set-function-length@1.2.2: @@ -6191,7 +6151,7 @@ packages: resolution: {integrity: sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==} dependencies: spdx-expression-parse: 3.0.1 - spdx-license-ids: 3.0.18 + spdx-license-ids: 3.0.20 dev: true /spdx-exceptions@2.5.0: @@ -6202,11 +6162,11 @@ packages: resolution: {integrity: sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==} dependencies: spdx-exceptions: 2.5.0 - spdx-license-ids: 3.0.18 + spdx-license-ids: 3.0.20 dev: true - /spdx-license-ids@3.0.18: - resolution: {integrity: sha512-xxRs31BqRYHwiMzudOrpSiHtZ8i/GeionCBDSilhYRj+9gIcI8wCZTlXZKu9vZIVqViP3dcp9qE5G6AlIaD+TQ==} + /spdx-license-ids@3.0.20: + resolution: {integrity: sha512-jg25NiDV/1fLtSgEgyvVyDunvaNHbuwF9lfNV17gSmPFAlYzdfNBlLtLzXTevwkPj7DhGbmN9VnmJIgLnhvaBw==} dev: true /sprintf-js@1.0.3: @@ -6420,7 +6380,7 @@ packages: dependencies: '@jridgewell/gen-mapping': 0.3.5 commander: 4.1.1 - glob: 10.4.2 + glob: 10.4.5 lines-and-columns: 1.2.4 mz: 2.7.0 pirates: 4.0.6 @@ -6452,7 +6412,7 @@ packages: resolution: {integrity: sha512-w2sfv80nrAh2VCbqR5AK27wswXhqcck2AhfnNW76beQXskGZ1V12GwS//yYVa3d3fcvAip2OUnbDAjW2k3v9fA==} engines: {node: '>=10.0.0'} dependencies: - ajv: 8.16.0 + ajv: 8.17.1 lodash.truncate: 4.4.2 slice-ansi: 4.0.0 string-width: 4.2.3 @@ -6495,8 +6455,8 @@ packages: resolution: {integrity: sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==} dev: false - /tinybench@2.8.0: - resolution: {integrity: sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==} + /tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} dev: true /tinypool@0.8.4: @@ -6591,8 +6551,8 @@ packages: resolution: {integrity: sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg==} dev: false - /tslib@2.6.3: - resolution: {integrity: sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==} + /tslib@2.7.0: + resolution: {integrity: sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==} dev: true /tsutils@3.21.0(typescript@4.9.5): @@ -6622,8 +6582,8 @@ packages: prelude-ls: 1.2.1 dev: true - /type-detect@4.0.8: - resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + /type-detect@4.1.0: + resolution: {integrity: sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==} engines: {node: '>=4'} dev: true @@ -6704,8 +6664,8 @@ packages: engines: {node: '>=4.2.0'} hasBin: true - /ufo@1.5.3: - resolution: {integrity: sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==} + /ufo@1.5.4: + resolution: {integrity: sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==} dev: true /unbox-primitive@1.0.2: @@ -6717,8 +6677,8 @@ packages: which-boxed-primitive: 1.0.2 dev: true - /undici-types@5.26.5: - resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + /undici-types@6.19.8: + resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} dev: true /unique-string@1.0.0: @@ -6836,15 +6796,16 @@ packages: hasBin: true dependencies: cac: 6.7.14 - debug: 4.3.5 + debug: 4.3.6 pathe: 1.1.2 picocolors: 1.0.1 - vite: 5.3.1(less@3.13.1) + vite: 5.4.2(less@3.13.1) transitivePeerDependencies: - '@types/node' - less - lightningcss - sass + - sass-embedded - stylus - sugarss - supports-color @@ -6856,7 +6817,7 @@ packages: peerDependencies: vite: '>2.0.0-0' dependencies: - debug: 4.3.5 + debug: 4.3.6 globrex: 0.1.2 recrawl-sync: 2.2.3 tsconfig-paths: 4.2.0 @@ -6895,14 +6856,14 @@ packages: dependencies: esbuild: 0.18.20 less: 3.13.1 - postcss: 8.4.38 + postcss: 8.4.41 rollup: 3.29.4 optionalDependencies: fsevents: 2.3.3 dev: true - /vite@5.3.1(less@3.13.1): - resolution: {integrity: sha512-XBmSKRLXLxiaPYamLv3/hnP/KXDai1NDexN0FpkTaZXTfycHvkRHoenpgl/fvuK/kPbB6xAgoyiryAhQNxYmAQ==} + /vite@5.4.2(less@3.13.1): + resolution: {integrity: sha512-dDrQTRHp5C1fTFzcSaMxjk6vdpKvT+2/mIdE07Gw2ykehT49O0z/VHS3zZ8iV/Gh8BJJKHWOe5RjaNrW5xf/GA==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true peerDependencies: @@ -6910,6 +6871,7 @@ packages: less: '*' lightningcss: ^1.21.0 sass: '*' + sass-embedded: '*' stylus: '*' sugarss: '*' terser: ^5.4.0 @@ -6922,6 +6884,8 @@ packages: optional: true sass: optional: true + sass-embedded: + optional: true stylus: optional: true sugarss: @@ -6931,13 +6895,13 @@ packages: dependencies: esbuild: 0.21.5 less: 3.13.1 - postcss: 8.4.38 - rollup: 4.18.0 + postcss: 8.4.41 + rollup: 4.21.1 optionalDependencies: fsevents: 2.3.3 dev: true - /vitest@1.6.0(jsdom@24.1.1)(less@3.13.1): + /vitest@1.6.0(jsdom@24.1.3)(less@3.13.1): resolution: {integrity: sha512-H5r/dN06swuFnzNFhq/dnz37bPXnq8xB2xB5JOVk8K09rUtoeNN+LHWkoQ0A/i3hvbUKKcCei9KpbxqHMLhLLA==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true @@ -6968,25 +6932,26 @@ packages: '@vitest/spy': 1.6.0 '@vitest/utils': 1.6.0 acorn-walk: 8.3.3 - chai: 4.4.1 - debug: 4.3.5 + chai: 4.5.0 + debug: 4.3.6 execa: 8.0.1 - jsdom: 24.1.1 + jsdom: 24.1.3 local-pkg: 0.5.0 - magic-string: 0.30.10 + magic-string: 0.30.11 pathe: 1.1.2 picocolors: 1.0.1 std-env: 3.7.0 strip-literal: 2.1.0 - tinybench: 2.8.0 + tinybench: 2.9.0 tinypool: 0.8.4 - vite: 5.3.1(less@3.13.1) + vite: 5.4.2(less@3.13.1) vite-node: 1.6.0(less@3.13.1) - why-is-node-running: 2.2.2 + why-is-node-running: 2.3.0 transitivePeerDependencies: - less - lightningcss - sass + - sass-embedded - stylus - sugarss - supports-color @@ -7106,8 +7071,8 @@ packages: isexe: 2.0.0 dev: true - /why-is-node-running@2.2.2: - resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==} + /why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} engines: {node: '>=8'} hasBin: true dependencies: @@ -7261,13 +7226,13 @@ packages: yargs-parser: 21.1.1 dev: true - /yocto-queue@1.0.0: - resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==} + /yocto-queue@1.1.1: + resolution: {integrity: sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==} engines: {node: '>=12.20'} dev: true - /zrender@5.5.0: - resolution: {integrity: sha512-O3MilSi/9mwoovx77m6ROZM7sXShR/O/JIanvzTwjN3FORfLSr81PsUGd7jlaYOeds9d8tw82oP44+3YucVo+w==} + /zrender@5.6.0: + resolution: {integrity: sha512-uzgraf4njmmHAbEUxMJ8Oxg+P3fT04O+9p7gY+wJRVxo8Ge+KmYv0WJev945EH4wFuc4OY2NLXz46FZrWS9xJg==} dependencies: tslib: 2.3.0 dev: false diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx index 0ad6aa3f174c..78954ebb5a54 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/app.tsx @@ -20,6 +20,7 @@ import React, { Suspense } from 'react'; import { Switch as AntDSwitch, Layout } from 'antd'; import NavBar from './components/navBar/navBar'; +import NavBarV2 from '@/v2/components/navBar/navBar'; import Breadcrumbs from './components/breadcrumbs/breadcrumbs'; import { HashRouter as Router, Switch, Route, Redirect } from 'react-router-dom'; import { routes } from '@/routes'; @@ -61,7 +62,11 @@ class App extends React.Component, IAppState> { return ( - + { + (enableNewUI) + ? + : + }

diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/navBar/navBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/navBar/navBar.less index 28978baaf97e..8e99972d1b44 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/navBar/navBar.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/navBar/navBar.less @@ -23,7 +23,7 @@ font-size: 20px; font-weight: 500; padding: 10px; - background-color: #002040; + background-color: #142329; .logo-text { margin-left: 10px; } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/axiosRequestHelper.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/axiosRequestHelper.tsx index 41774088c503..8fbe403dc375 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/axiosRequestHelper.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/axiosRequestHelper.tsx @@ -20,7 +20,7 @@ import axios, { AxiosResponse } from 'axios'; export const AxiosGetHelper = ( url: string, - controller: AbortController, + controller: AbortController | undefined, message: string = '', params: any = {}, ): { request: Promise>; controller: AbortController } => { @@ -50,7 +50,7 @@ export const AxiosPutHelper = ( export const PromiseAllSettledGetHelper = ( urls: string[], - controller: AbortController, + controller: AbortController | undefined, message: string = '' ): { requests: Promise>[]>; controller: AbortController } => { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less new file mode 100644 index 000000000000..09ec283d555e --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.less @@ -0,0 +1,65 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +.logo-v2 { + color: #FFFFFF; + font-size: 18px; + font-weight: 500; + padding: 20px; + background-color: #142329; + .logo-text-v2 { + margin-left: 10px; + } +} + +.ant-layout-sider-collapsed { + .logo-v2 { + padding: 10px; + + .logo-text-v2 { + display: none; + } + } + .ant-layout-sider-trigger { + background: #142329 !important; + text-align: center !important; + padding-left: 20px !important; + } +} + +.ant-layout-sider { + background: #142329 !important; + + .ant-menu-dark { + background: #142329 !important; + + .ant-menu-item-selected { + span { + color: #4DCF4C !important; + } + background: #224452 !important; + color: #4DCF4C !important; + } + } + + .ant-layout-sider-trigger { + background: #142329 !important; + text-align: unset !important; + padding-left: 25px; + } +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx new file mode 100644 index 000000000000..3da4104634c8 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/navBar/navBar.tsx @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useState, useEffect, useRef } from 'react'; +import axios, { AxiosResponse } from 'axios'; +import { Layout, Menu, Spin } from 'antd'; +import { + BarChartOutlined, + ClusterOutlined, + ContainerOutlined, + DashboardOutlined, + DatabaseOutlined, + DeploymentUnitOutlined, + FolderOpenOutlined, + InboxOutlined, + LayoutOutlined, + PieChartOutlined +} from '@ant-design/icons'; +import { useLocation, Link } from 'react-router-dom'; + + +import logo from '@/logo.png'; +import { showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper, cancelRequests } from '@/utils/axiosRequestHelper'; + +import './navBar.less'; + + +// ------------- Types -------------- // +type NavBarProps = { + collapsed: boolean; + onCollapse: (arg0: boolean) => void; +} + +const NavBar: React.FC = ({ + collapsed = false, + onCollapse = () => { } +}) => { + const [isHeatmapEnabled, setIsHeatmapEnabled] = useState(false); + const cancelDisabledFeatureSignal = useRef(); + const location = useLocation(); + + const fetchDisabledFeatures = async () => { + const disabledfeaturesEndpoint = `/api/v1/features/disabledFeatures`; + const { request, controller } = AxiosGetHelper( + disabledfeaturesEndpoint, + cancelDisabledFeatureSignal.current + ) + cancelDisabledFeatureSignal.current = controller; + try { + const response: AxiosResponse = await request; + const heatmapDisabled = response?.data?.includes('HEATMAP') + setIsHeatmapEnabled(!heatmapDisabled); + } catch (error: unknown) { + showDataFetchError((error as Error).toString()) + } + } + + + useEffect(() => { + fetchDisabledFeatures(); + // Component will unmount + return (() => { + cancelRequests([cancelDisabledFeatureSignal.current!]) + }) + }, []) + + const menuItems = [( + }> + Overview + + + ), ( + }> + Volumes + + + ), ( + }> + Buckets + + + ), ( + }> + Datanodes + + + ), ( + }> + Pipelines + + + ), ( + }> + Containers + + + ), ( + }> + }> + Insights + + + }> + OM DB Insights + + + + ), ( + }> + Disk Usage + + + ), ( + isHeatmapEnabled && + }> + Heatmap + + + )] + return ( + +
+ Ozone Recon Logo + Ozone Recon +
+ + {...menuItems} + +
+ ); +} + +export default NavBar; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx index dc511b62ca30..0394c8ac511c 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx @@ -16,7 +16,7 @@ * limitations under the License. */ -import React, { useEffect, useState } from 'react'; +import React, { useEffect, useRef, useState } from 'react'; import moment from 'moment'; import filesize from 'filesize'; import axios, { CanceledError } from 'axios'; @@ -105,8 +105,8 @@ const getSummaryTableValue = ( const Overview: React.FC<{}> = () => { - let cancelOverviewSignal: AbortController; - let cancelOMDBSyncSignal: AbortController; + const cancelOverviewSignal = useRef(); + const cancelOMDBSyncSignal = useRef(); const [state, setState] = useState({ loading: false, @@ -147,8 +147,8 @@ const Overview: React.FC<{}> = () => { // Component will Un-mount autoReloadHelper.stopPolling(); cancelRequests([ - cancelOMDBSyncSignal, - cancelOverviewSignal + cancelOMDBSyncSignal.current!, + cancelOverviewSignal.current! ]); }) }, []) @@ -161,8 +161,8 @@ const Overview: React.FC<{}> = () => { // Cancel any previous pending requests cancelRequests([ - cancelOMDBSyncSignal, - cancelOverviewSignal + cancelOMDBSyncSignal.current!, + cancelOverviewSignal.current! ]); const { requests, controller } = PromiseAllSettledGetHelper([ @@ -170,8 +170,8 @@ const Overview: React.FC<{}> = () => { '/api/v1/task/status', '/api/v1/keys/open/summary', '/api/v1/keys/deletePending/summary' - ], cancelOverviewSignal); - cancelOverviewSignal = controller; + ], cancelOverviewSignal.current); + cancelOverviewSignal.current = controller; requests.then(axios.spread(( clusterStateResponse: Awaited>, @@ -264,10 +264,10 @@ const Overview: React.FC<{}> = () => { const { request, controller } = AxiosGetHelper( '/api/v1/triggerdbsync/om', - cancelOMDBSyncSignal, + cancelOMDBSyncSignal.current, 'OM-DB Sync request cancelled because data was updated' ); - cancelOMDBSyncSignal = controller; + cancelOMDBSyncSignal.current = controller; request.then(omStatusResponse => { const omStatus = omStatusResponse.data; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx index a5918ac6ce6c..6c323fc94990 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/volumes/volumes.tsx @@ -16,7 +16,7 @@ * limitations under the License. */ -import React, { useEffect, useState } from 'react'; +import React, { useEffect, useRef, useState } from 'react'; import moment from 'moment'; import { Table } from 'antd'; import { Link } from 'react-router-dom'; @@ -35,7 +35,7 @@ import Search from '@/v2/components/search/search'; import { byteToSize, showDataFetchError } from '@/utils/common'; import { AutoReloadHelper } from '@/utils/autoReloadHelper'; -import { AxiosGetHelper } from "@/utils/axiosRequestHelper"; +import { AxiosGetHelper, cancelRequests } from "@/utils/axiosRequestHelper"; import { useDebounce } from '@/v2/hooks/debounce.hook'; import { @@ -70,7 +70,7 @@ const LIMIT_OPTIONS: Option[] = [ const Volumes: React.FC<{}> = () => { - let cancelSignal: AbortController; + const cancelSignal = useRef(); const COLUMNS: ColumnsType = [ { @@ -181,15 +181,17 @@ const Volumes: React.FC<{}> = () => { const loadData = () => { setLoading(true); + // Cancel any previous pending requests + cancelRequests([cancelSignal.current!]); const { request, controller } = AxiosGetHelper( '/api/v1/volumes', - cancelSignal, + cancelSignal.current, "", { limit: selectedLimit.value } ); - cancelSignal = controller; + cancelSignal.current = controller; request.then(response => { const volumesResponse: VolumesResponse = response.data; const volumes: Volume[] = volumesResponse.volumes; @@ -228,7 +230,7 @@ const Volumes: React.FC<{}> = () => { // Component will unmount return (() => { autoReloadHelper.stopPolling(); - cancelSignal && cancelSignal.abort(); + cancelRequests([cancelSignal.current!]); }) }, []); From 2d372f6abdf7d14500935c64014551f27f222d6e Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Thu, 29 Aug 2024 21:02:00 +0530 Subject: [PATCH 023/106] HDDS-11339. Let PrometheusServlet rely on periodically published metrics (#7092) --- .../org/apache/hadoop/hdds/server/http/PrometheusServlet.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java index 0d01aa43b421..535a5e6c8e9b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/PrometheusServlet.java @@ -24,8 +24,6 @@ import java.io.IOException; import java.io.PrintWriter; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; - import io.prometheus.client.CollectorRegistry; import io.prometheus.client.exporter.common.TextFormat; @@ -58,7 +56,6 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) return; } } - DefaultMetricsSystem.instance().publishMetricsNow(); PrintWriter writer = resp.getWriter(); getPrometheusSink().writeMetrics(writer); writer.write("\n\n#Dropwizard metrics\n\n"); From cc4e026d59e58273c412de15e0d4e6d5715848a3 Mon Sep 17 00:00:00 2001 From: jianghuazhu <740087514@qq.com> Date: Thu, 29 Aug 2024 23:48:56 +0800 Subject: [PATCH 024/106] HDDS-11304. Make up for the missing functionality in CommandDispatcher (#7062) --- .../CloseContainerCommandHandler.java | 15 ++++- .../commandhandler/CommandDispatcher.java | 17 +++--- .../DeleteBlocksCommandHandler.java | 4 +- .../DeleteContainerCommandHandler.java | 17 ++++-- .../TestCloseContainerCommandHandler.java | 27 +++++++++ .../TestDeleteContainerCommandHandler.java | 59 ++++++++++++++++--- 6 files changed, 113 insertions(+), 26 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java index 8533f7384d41..bc703ac6a552 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java @@ -18,7 +18,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -58,11 +57,11 @@ public class CloseContainerCommandHandler implements CommandHandler { private final AtomicLong invocationCount = new AtomicLong(0); private final AtomicInteger queuedCount = new AtomicInteger(0); - private final ExecutorService executor; + private final ThreadPoolExecutor executor; private long totalTime; /** - * Constructs a ContainerReport handler. + * Constructs a close container command handler. */ public CloseContainerCommandHandler( int threadPoolSize, int queueSize, String threadNamePrefix) { @@ -220,4 +219,14 @@ public long getTotalRunTime() { public int getQueuedCount() { return queuedCount.get(); } + + @Override + public int getThreadPoolMaxPoolSize() { + return executor.getMaximumPoolSize(); + } + + @Override + public int getThreadPoolActivePoolSize() { + return executor.getActiveCount(); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java index 9035b79c6709..c3f8da74c7a8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java @@ -56,11 +56,6 @@ public final class CommandDispatcher { private CommandDispatcher(OzoneContainer container, SCMConnectionManager connectionManager, StateContext context, CommandHandler... handlers) { - Preconditions.checkNotNull(context); - Preconditions.checkNotNull(handlers); - Preconditions.checkArgument(handlers.length > 0); - Preconditions.checkNotNull(container); - Preconditions.checkNotNull(connectionManager); this.context = context; this.container = container; this.connectionManager = connectionManager; @@ -77,6 +72,7 @@ private CommandDispatcher(OzoneContainer container, SCMConnectionManager commandHandlerMetrics = CommandHandlerMetrics.create(handlerMap); } + @VisibleForTesting public CommandHandler getCloseContainerHandler() { return handlerMap.get(Type.closeContainerCommand); } @@ -201,11 +197,12 @@ public Builder setContext(StateContext stateContext) { * @return Command Dispatcher. */ public CommandDispatcher build() { - Preconditions.checkNotNull(this.connectionManager, "Missing connection" + - " manager."); - Preconditions.checkNotNull(this.container, "Missing container."); - Preconditions.checkNotNull(this.context, "Missing context."); - Preconditions.checkArgument(this.handlerList.size() > 0); + Preconditions.checkNotNull(this.connectionManager, + "Missing scm connection manager."); + Preconditions.checkNotNull(this.container, "Missing ozone container."); + Preconditions.checkNotNull(this.context, "Missing state context."); + Preconditions.checkArgument(this.handlerList.size() > 0, + "The number of command handlers must be greater than 0."); return new CommandDispatcher(this.container, this.connectionManager, this.context, handlerList.toArray( new CommandHandler[handlerList.size()])); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 747749066e3d..bd7431c61452 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -168,12 +168,12 @@ public int getQueuedCount() { @Override public int getThreadPoolMaxPoolSize() { - return ((ThreadPoolExecutor)executor).getMaximumPoolSize(); + return executor.getMaximumPoolSize(); } @Override public int getThreadPoolActivePoolSize() { - return ((ThreadPoolExecutor)executor).getActiveCount(); + return executor.getActiveCount(); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java index ead81c32e5b2..b76e306e1c07 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java @@ -36,7 +36,6 @@ import java.io.IOException; import java.time.Clock; import java.util.OptionalLong; -import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -53,7 +52,7 @@ public class DeleteContainerCommandHandler implements CommandHandler { private final AtomicInteger invocationCount = new AtomicInteger(0); private final AtomicInteger timeoutCount = new AtomicInteger(0); private final AtomicLong totalTime = new AtomicLong(0); - private final ExecutorService executor; + private final ThreadPoolExecutor executor; private final Clock clock; private int maxQueueSize; @@ -70,7 +69,7 @@ public DeleteContainerCommandHandler( } protected DeleteContainerCommandHandler(Clock clock, - ExecutorService executor, int queueSize) { + ThreadPoolExecutor executor, int queueSize) { this.executor = executor; this.clock = clock; maxQueueSize = queueSize; @@ -131,7 +130,7 @@ private void handleInternal(SCMCommand command, StateContext context, @Override public int getQueuedCount() { - return ((ThreadPoolExecutor)executor).getQueue().size(); + return executor.getQueue().size(); } @Override @@ -160,6 +159,16 @@ public long getTotalRunTime() { return totalTime.get(); } + @Override + public int getThreadPoolMaxPoolSize() { + return executor.getMaximumPoolSize(); + } + + @Override + public int getThreadPoolActivePoolSize() { + return executor.getActiveCount(); + } + @Override public void stop() { try { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index 219645c8edcc..a3b60aa36dab 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.UUID; @@ -43,6 +44,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.GB; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; @@ -292,4 +295,28 @@ private void waitTillFinishExecution( GenericTestUtils.waitFor(() -> closeHandler.getQueuedCount() <= 0, 10, 3000); } + + @Test + public void testThreadPoolPoolSize() { + assertEquals(1, subject.getThreadPoolMaxPoolSize()); + assertEquals(0, subject.getThreadPoolActivePoolSize()); + + CloseContainerCommandHandler closeContainerCommandHandler = + new CloseContainerCommandHandler(10, 10, ""); + closeContainerCommandHandler.handle(new CloseContainerCommand( + CONTAINER_ID + 1, PipelineID.randomId()), + ozoneContainer, context, null); + closeContainerCommandHandler.handle(new CloseContainerCommand( + CONTAINER_ID + 2, PipelineID.randomId()), + ozoneContainer, context, null); + closeContainerCommandHandler.handle(new CloseContainerCommand( + CONTAINER_ID + 3, PipelineID.randomId()), + ozoneContainer, context, null); + closeContainerCommandHandler.handle(new CloseContainerCommand( + CONTAINER_ID + 4, PipelineID.randomId()), + ozoneContainer, context, null); + assertEquals(10, closeContainerCommandHandler.getThreadPoolMaxPoolSize()); + assertTrue(closeContainerCommandHandler.getThreadPoolActivePoolSize() > 0); + } + } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java index 49c34828fbd6..5ee31b97fd64 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java @@ -19,6 +19,14 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.LinkedBlockingQueue; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -32,7 +40,6 @@ import java.time.ZoneId; import java.util.OptionalLong; -import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -63,8 +70,14 @@ public void setup() { } @Test - public void testExpiredCommandsAreNotProcessed() throws IOException { - DeleteContainerCommandHandler handler = createSubject(clock, 1000); + public void testExpiredCommandsAreNotProcessed() + throws IOException, InterruptedException { + CountDownLatch latch1 = new CountDownLatch(1); + ThreadFactory threadFactory = new ThreadFactoryBuilder().build(); + ThreadPoolWithLockExecutor executor = new ThreadPoolWithLockExecutor( + threadFactory, latch1); + DeleteContainerCommandHandler handler = new DeleteContainerCommandHandler( + clock, executor, 100); DeleteContainerCommand command1 = new DeleteContainerCommand(1L); command1.setDeadline(clock.millis() + 10000); @@ -75,9 +88,14 @@ public void testExpiredCommandsAreNotProcessed() throws IOException { clock.fastForward(15000); handler.handle(command1, ozoneContainer, null, null); + latch1.await(); assertEquals(1, handler.getTimeoutCount()); + CountDownLatch latch2 = new CountDownLatch(2); + executor.setLatch(latch2); handler.handle(command2, ozoneContainer, null, null); handler.handle(command3, ozoneContainer, null, null); + latch2.await(); + assertEquals(1, handler.getTimeoutCount()); assertEquals(3, handler.getInvocationCount()); verify(controller, times(0)) @@ -89,7 +107,8 @@ public void testExpiredCommandsAreNotProcessed() throws IOException { } @Test - public void testCommandForCurrentTermIsExecuted() throws IOException { + public void testCommandForCurrentTermIsExecuted() + throws IOException, InterruptedException { // GIVEN DeleteContainerCommand command = new DeleteContainerCommand(1L); command.setTerm(1); @@ -97,10 +116,17 @@ public void testCommandForCurrentTermIsExecuted() throws IOException { when(context.getTermOfLeaderSCM()) .thenReturn(OptionalLong.of(command.getTerm())); - DeleteContainerCommandHandler subject = createSubject(); + TestClock testClock = new TestClock(Instant.now(), ZoneId.systemDefault()); + CountDownLatch latch = new CountDownLatch(1); + ThreadFactory threadFactory = new ThreadFactoryBuilder().build(); + ThreadPoolWithLockExecutor executor = new ThreadPoolWithLockExecutor( + threadFactory, latch); + DeleteContainerCommandHandler subject = new DeleteContainerCommandHandler( + testClock, executor, 100); // WHEN subject.handle(command, ozoneContainer, context, null); + latch.await(); // THEN verify(controller, times(1)) @@ -163,8 +189,10 @@ private static DeleteContainerCommandHandler createSubject() { private static DeleteContainerCommandHandler createSubject( TestClock clock, int queueSize) { - return new DeleteContainerCommandHandler(clock, - newDirectExecutorService(), queueSize); + ThreadFactory threadFactory = new ThreadFactoryBuilder().build(); + ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors. + newFixedThreadPool(1, threadFactory); + return new DeleteContainerCommandHandler(clock, executor, queueSize); } private static DeleteContainerCommandHandler createSubjectWithPoolSize( @@ -172,4 +200,21 @@ private static DeleteContainerCommandHandler createSubjectWithPoolSize( return new DeleteContainerCommandHandler(1, clock, queueSize, ""); } + static class ThreadPoolWithLockExecutor extends ThreadPoolExecutor { + private CountDownLatch countDownLatch; + ThreadPoolWithLockExecutor(ThreadFactory threadFactory, CountDownLatch latch) { + super(1, 1, 0, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(), threadFactory); + this.countDownLatch = latch; + } + + void setLatch(CountDownLatch latch) { + this.countDownLatch = latch; + } + + @Override + protected void afterExecute(Runnable r, Throwable t) { + countDownLatch.countDown(); + } + } } From 47564bb47df6a52b5c86b5ce668f25a5a8922fe9 Mon Sep 17 00:00:00 2001 From: Chung En Lee Date: Fri, 30 Aug 2024 02:00:16 +0800 Subject: [PATCH 025/106] HDDS-11359. Intermittent timeout in TestPipelineManagerMXBean#testPipelineInfo (#7132) --- .../hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java index 75d860d951be..4a9efceeb7b8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -62,16 +61,15 @@ public void init() * * @throws Exception */ - @Flaky("HDDS-11359") @Test public void testPipelineInfo() throws Exception { ObjectName bean = new ObjectName( "Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo"); - Map pipelineStateCount = cluster - .getStorageContainerManager().getPipelineManager().getPipelineInfo(); GenericTestUtils.waitFor(() -> { try { + Map pipelineStateCount = cluster + .getStorageContainerManager().getPipelineManager().getPipelineInfo(); final TabularData data = (TabularData) mbs.getAttribute( bean, "PipelineInfo"); for (Map.Entry entry : pipelineStateCount.entrySet()) { From 5992837af4d01c694085d59d3525eadd7f255456 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Sep 2024 06:10:51 +0200 Subject: [PATCH 026/106] HDDS-11399. Bump maven-deploy-plugin to 3.1.3 (#7143) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index e8872fd71300..e03d1078971c 100644 --- a/pom.xml +++ b/pom.xml @@ -268,7 +268,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.7.0 3.7.1 0.16.1 - 3.1.2 + 3.1.3 3.6.0 3.7.1 4.2.2 From a8e3ea97c72716b55f474499a32fa0a6f422394c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Sep 2024 07:23:10 +0200 Subject: [PATCH 027/106] HDDS-11397. Bump Jersey2 to 2.45 (#7141) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index e03d1078971c..dc55bb2f9ee6 100644 --- a/pom.xml +++ b/pom.xml @@ -156,7 +156,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.19.4 - 2.44 + 2.45 1.9.13 From b5e1a8b1711b196c20a7c8e6d5d70c9a5256dacc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Sep 2024 08:33:25 +0200 Subject: [PATCH 028/106] HDDS-11398. Bump commons-compress to 1.27.1 (#7142) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index dc55bb2f9ee6..821cd911a202 100644 --- a/pom.xml +++ b/pom.xml @@ -113,7 +113,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.8.0 1.17.0 3.2.2 - 1.27.0 + 1.27.1 2.10.1 1.5.6-4 1.4.0 From 814f78f9ff3f7c1cbb369a01baed25745cfd0030 Mon Sep 17 00:00:00 2001 From: Istvan Fajth Date: Sun, 1 Sep 2024 22:57:25 +0200 Subject: [PATCH 029/106] HDDS-11392. ChecksumByteBufferImpl's static initializer fails with java 17+ (#7135) --- .../java/org/apache/hadoop/hdds/JavaUtils.java | 14 +++++++++++++- .../ozone/common/ChecksumByteBufferImpl.java | 14 ++++++++------ 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java index 63c29ba7c912..804e6552488c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/JavaUtils.java @@ -30,12 +30,24 @@ public final class JavaUtils { * is equal or greater than the parameter. * * @param version 8, 9, 10 etc. - * @return comparison with system property, always true for 8 + * @return comparison with system property, always true for any int up to 8 */ public static boolean isJavaVersionAtLeast(int version) { return JAVA_SPEC_VER >= version; } + /** + * Query to see if major version of Java specification of the system + * is equal or less than the parameter. + * + * @param version 8, 9, 10 etc. + * @return comparison with system property + */ + public static boolean isJavaVersionAtMost(int version) { + return JAVA_SPEC_VER <= version; + } + + /** * Private constructor. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java index 1d596bf70077..a52359783270 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java @@ -44,12 +44,14 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer { static { Field f = null; - try { - f = ByteBuffer.class - .getDeclaredField("isReadOnly"); - f.setAccessible(true); - } catch (NoSuchFieldException e) { - LOG.error("No isReadOnly field in ByteBuffer", e); + if (JavaUtils.isJavaVersionAtMost(8)) { + try { + f = ByteBuffer.class + .getDeclaredField("isReadOnly"); + f.setAccessible(true); + } catch (NoSuchFieldException e) { + LOG.error("No isReadOnly field in ByteBuffer", e); + } } IS_READY_ONLY_FIELD = f; From 877504aee1a5a23eeaa097e159b033018af66c86 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Tue, 3 Sep 2024 17:38:26 +0530 Subject: [PATCH 030/106] HDDS-11156. Improve Buckets page UI (#7100) --- .../recon/ozone-recon-web/pnpm-lock.yaml | 134 ++--- .../src/v2/components/search/search.tsx | 4 + .../src/v2/components/select/multiSelect.tsx | 56 +- .../src/v2/components/select/singleSelect.tsx | 4 +- .../src/v2/pages/buckets/buckets.less | 41 ++ .../src/v2/pages/buckets/buckets.tsx | 563 ++++++++++++++++++ .../src/v2/pages/volumes/volumes.tsx | 8 +- .../ozone-recon-web/src/v2/routes-v2.tsx | 5 + .../src/v2/types/bucket.types.ts | 20 +- 9 files changed, 742 insertions(+), 93 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index ebbc4e2219da..3c472d5f7903 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -153,7 +153,7 @@ packages: dependencies: '@ant-design/colors': 6.0.0 '@ant-design/icons-svg': 4.4.2 - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 lodash: 4.17.21 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -166,7 +166,7 @@ packages: peerDependencies: react: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 json2mq: 0.2.0 lodash: 4.17.21 @@ -187,11 +187,11 @@ packages: '@babel/highlight': 7.24.7 picocolors: 1.0.1 - /@babel/generator@7.25.5: - resolution: {integrity: sha512-abd43wyLfbWoxC6ahM8xTkqLpGB2iWBVyuKC9/srhFunCd1SDNrV1s72bBpK4hLj8KLzHBBcOblvLQZBNw9r3w==} + /@babel/generator@7.25.6: + resolution: {integrity: sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.25.4 + '@babel/types': 7.25.6 '@jridgewell/gen-mapping': 0.3.5 '@jridgewell/trace-mapping': 0.3.25 jsesc: 2.5.2 @@ -201,8 +201,8 @@ packages: resolution: {integrity: sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==} engines: {node: '>=6.9.0'} dependencies: - '@babel/traverse': 7.25.4 - '@babel/types': 7.25.4 + '@babel/traverse': 7.25.6 + '@babel/types': 7.25.6 transitivePeerDependencies: - supports-color dev: false @@ -225,16 +225,16 @@ packages: js-tokens: 4.0.0 picocolors: 1.0.1 - /@babel/parser@7.25.4: - resolution: {integrity: sha512-nq+eWrOgdtu3jG5Os4TQP3x3cLA8hR8TvJNjD8vnPa20WGycimcparWnLK4jJhElTK6SDyuJo1weMKO/5LpmLA==} + /@babel/parser@7.25.6: + resolution: {integrity: sha512-trGdfBdbD0l1ZPmcJ83eNxB9rbEax4ALFTF7fN386TMYbeCQbyme5cOEXQhbGXKebwGaB/J52w1mrklMcbgy6Q==} engines: {node: '>=6.0.0'} hasBin: true dependencies: - '@babel/types': 7.25.4 + '@babel/types': 7.25.6 dev: false - /@babel/runtime@7.25.4: - resolution: {integrity: sha512-DSgLeL/FNcpXuzav5wfYvHCGvynXkJbn3Zvc3823AEe9nPwW9IK4UoCSS5yGymmQzN0pCPvivtgS6/8U2kkm1w==} + /@babel/runtime@7.25.6: + resolution: {integrity: sha512-VBj9MYyDb9tuLq7yzqjgzt6Q+IBQLrGZfdjOekyEirZPHxXWoTSGUTMrpsfi58Up73d13NfYLv8HT9vmznjzhQ==} engines: {node: '>=6.9.0'} dependencies: regenerator-runtime: 0.14.1 @@ -244,27 +244,27 @@ packages: engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/parser': 7.25.4 - '@babel/types': 7.25.4 + '@babel/parser': 7.25.6 + '@babel/types': 7.25.6 dev: false - /@babel/traverse@7.25.4: - resolution: {integrity: sha512-VJ4XsrD+nOvlXyLzmLzUs/0qjFS4sK30te5yEFlvbbUNEgKaVb2BHZUpAL+ttLPQAHNrsI3zZisbfha5Cvr8vg==} + /@babel/traverse@7.25.6: + resolution: {integrity: sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==} engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/generator': 7.25.5 - '@babel/parser': 7.25.4 + '@babel/generator': 7.25.6 + '@babel/parser': 7.25.6 '@babel/template': 7.25.0 - '@babel/types': 7.25.4 + '@babel/types': 7.25.6 debug: 4.3.6 globals: 11.12.0 transitivePeerDependencies: - supports-color dev: false - /@babel/types@7.25.4: - resolution: {integrity: sha512-zQ1ijeeCXVEh+aNL0RlmkPkG8HUiDcU2pzQQFjtbntgAczRASFzj4H+6+bV+dy1ntKR14I/DypeuRG1uma98iQ==} + /@babel/types@7.25.6: + resolution: {integrity: sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==} engines: {node: '>=6.9.0'} dependencies: '@babel/helper-string-parser': 7.24.8 @@ -295,7 +295,7 @@ packages: peerDependencies: react: '>=16.3.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 '@emotion/cache': 10.0.29 '@emotion/css': 10.0.27 '@emotion/serialize': 0.11.16 @@ -1186,7 +1186,7 @@ packages: engines: {node: '>=12'} dependencies: '@babel/code-frame': 7.24.7 - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 '@types/aria-query': 5.0.4 aria-query: 5.1.3 chalk: 4.1.2 @@ -1215,7 +1215,7 @@ packages: react: <18.0.0 react-dom: <18.0.0 dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 '@testing-library/dom': 8.20.1 '@types/react-dom': 16.8.4 react: 16.14.0 @@ -1674,7 +1674,7 @@ packages: '@ant-design/colors': 5.1.1 '@ant-design/icons': 4.8.3(react-dom@16.14.0)(react@16.14.0) '@ant-design/react-slick': 0.28.4(react@16.14.0) - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 array-tree-filter: 2.1.0 classnames: 2.5.1 copy-to-clipboard: 3.3.3 @@ -1853,7 +1853,7 @@ packages: /babel-plugin-macros@2.8.0: resolution: {integrity: sha512-SEP5kJpfGYqYKpBrj5XU3ahw5p5GOHJ0U5ssOSQ/WBVdwkD2Dzlce95exQTs3jOVWPPKLBN2rlEWkCK7dSmLvg==} dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 cosmiconfig: 6.0.0 resolve: 1.22.8 dev: false @@ -2370,7 +2370,7 @@ packages: resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==} engines: {node: '>=0.11'} dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 dev: false /dayjs@1.11.13: @@ -2547,7 +2547,7 @@ packages: /dom-helpers@5.2.1: resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==} dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 csstype: 3.1.3 dev: false @@ -3514,7 +3514,7 @@ packages: /history@4.10.1: resolution: {integrity: sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==} dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 loose-envify: 1.4.0 resolve-pathname: 3.0.0 tiny-invariant: 1.3.3 @@ -5088,7 +5088,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 dom-align: 1.12.4 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5103,7 +5103,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 array-tree-filter: 2.1.0 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5118,7 +5118,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5130,7 +5130,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5145,7 +5145,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5159,7 +5159,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5172,7 +5172,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5186,7 +5186,7 @@ packages: react: '>= 16.9.0' react-dom: '>= 16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 async-validator: 3.5.2 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5199,7 +5199,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-dialog: 8.5.3(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5213,7 +5213,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5226,7 +5226,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-menu: 8.10.8(react-dom@16.14.0)(react@16.14.0) rc-textarea: 0.3.7(react-dom@16.14.0)(react@16.14.0) @@ -5242,7 +5242,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 mini-store: 3.0.6(react-dom@16.14.0)(react@16.14.0) rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) @@ -5260,7 +5260,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5274,7 +5274,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5288,7 +5288,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5302,7 +5302,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5315,7 +5315,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 date-fns: 2.30.0 dayjs: 1.11.13 @@ -5333,7 +5333,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5346,7 +5346,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5359,7 +5359,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5374,7 +5374,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-overflow: 1.3.2(react-dom@16.14.0)(react@16.14.0) @@ -5392,7 +5392,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-tooltip: 5.0.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5408,7 +5408,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5421,7 +5421,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5435,7 +5435,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5451,7 +5451,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-dropdown: 3.2.5(react-dom@16.14.0)(react@16.14.0) rc-menu: 8.10.8(react-dom@16.14.0)(react@16.14.0) @@ -5467,7 +5467,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5482,7 +5482,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 rc-trigger: 5.3.4(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 react-dom: 16.14.0(react@16.14.0) @@ -5494,7 +5494,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-select: 12.1.13(react-dom@16.14.0)(react@16.14.0) rc-tree: 4.1.5(react-dom@16.14.0)(react@16.14.0) @@ -5510,7 +5510,7 @@ packages: react: '*' react-dom: '*' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5526,7 +5526,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-align: 4.0.15(react-dom@16.14.0)(react@16.14.0) rc-motion: 2.9.2(react-dom@16.14.0)(react@16.14.0) @@ -5541,7 +5541,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) react: 16.14.0 @@ -5554,7 +5554,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 react: 16.14.0 react-dom: 16.14.0(react@16.14.0) react-is: 18.3.1 @@ -5567,7 +5567,7 @@ packages: react: '>=16.9.0' react-dom: '>=16.9.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 classnames: 2.5.1 rc-resize-observer: 1.4.0(react-dom@16.14.0)(react@16.14.0) rc-util: 5.43.0(react-dom@16.14.0)(react@16.14.0) @@ -5620,7 +5620,7 @@ packages: peerDependencies: react: '>=15' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 history: 4.10.1 loose-envify: 1.4.0 prop-types: 15.8.1 @@ -5635,7 +5635,7 @@ packages: peerDependencies: react: '>=15' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 history: 4.10.1 hoist-non-react-statics: 3.3.2 loose-envify: 1.4.0 @@ -5653,7 +5653,7 @@ packages: react: ^16.8.0 || ^17.0.0 react-dom: ^16.8.0 || ^17.0.0 dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 '@emotion/cache': 10.0.29 '@emotion/core': 10.3.1(react@16.14.0) '@emotion/css': 10.0.27 @@ -5673,7 +5673,7 @@ packages: react: '>=16.6.0' react-dom: '>=16.6.0' dependencies: - '@babel/runtime': 7.25.4 + '@babel/runtime': 7.25.6 dom-helpers: 5.2.1 loose-envify: 1.4.0 prop-types: 15.8.1 diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx index 21d4341787ed..8cac2a9c0477 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx @@ -23,6 +23,7 @@ import { Option } from '@/v2/components/select/singleSelect'; // ------------- Types -------------- // type SearchProps = { + disabled?: boolean; searchColumn?: string; searchInput: string; searchOptions?: Option[]; @@ -39,6 +40,7 @@ type SearchProps = { // ------------- Component -------------- // const Search: React.FC = ({ + disabled = false, searchColumn, searchInput = '', searchOptions = [], @@ -48,6 +50,7 @@ const Search: React.FC = ({ const selectFilter = searchColumn ? ( { } // ------------- Component -------------- // + +const Option: React.FC> = (props) => { + return ( +
+ + null} /> + + +
+ ) +} + + const MultiSelect: React.FC = ({ options = [], selected = [], @@ -58,24 +80,20 @@ const MultiSelect: React.FC = ({ ...props }) => { - const Option: React.FC> = (props) => { + const ValueContainer = ({ children, ...props }: ValueContainerProps) => { return ( -
- - null} /> - - -
- ) - } + + {React.Children.map(children, (child) => ( + ((child as React.ReactElement> + | React.ReactPortal)?.type as React.JSXElementConstructor)).name === "DummyInput" + ? child + : null + )} + {placeholder}: {selected.length} selected + + ); + }; return ( = ({ classNamePrefix='multi-select' options={options} components={{ + ValueContainer, Option }} placeholder={placeholder} value={selected} + isOptionDisabled={(option) => option.value === fixedColumn} onChange={(selected: ValueType) => { if (selected?.length === options.length) return onChange!(options); return onChange!(selected); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx index 41ab03f5982c..1d02b407334b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/singleSelect.tsx @@ -50,7 +50,7 @@ const SingleSelect: React.FC = ({ const ValueContainer = ({ children, ...props }: ValueContainerProps) => { - const selectedLimit = props.getValue() as Option[]; + const selectedValue = props.getValue() as Option[]; return ( {React.Children.map(children, (child) => ( @@ -60,7 +60,7 @@ const SingleSelect: React.FC = ({ ? child : null )} - Limit: {selectedLimit[0]?.label ?? ''} + {placeholder}: {selectedValue[0]?.label ?? ''} ); }; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less new file mode 100644 index 000000000000..8f4c8ffaf9f2 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.less @@ -0,0 +1,41 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +.content-div { + min-height: unset; + + .table-header-section { + display: flex; + justify-content: space-between; + align-items: center; + + .table-filter-section { + font-size: 14px; + font-weight: normal; + display: flex; + column-gap: 8px; + padding: 16px 8px; + } + } + + .tag-block { + display: flex; + column-gap: 8px; + padding: 0px 8px 16px 8px; + } +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx new file mode 100644 index 000000000000..bd8950e54c87 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/buckets/buckets.tsx @@ -0,0 +1,563 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React, { useEffect, useState } from 'react'; +import moment from 'moment'; +import { Table, Tag } from 'antd'; +import { + ColumnProps, + ColumnsType, + TablePaginationConfig +} from 'antd/es/table'; +import { + CheckCircleOutlined, + CloseCircleOutlined, + CloudServerOutlined, + FileUnknownOutlined, + HddOutlined, + LaptopOutlined, + SaveOutlined +} from '@ant-design/icons'; +import { ValueType } from 'react-select'; +import { useLocation } from 'react-router-dom'; + +import QuotaBar from '@/components/quotaBar/quotaBar'; +import AutoReloadPanel from '@/components/autoReloadPanel/autoReloadPanel'; +import AclPanel from '@/v2/components/aclDrawer/aclDrawer'; +import Search from '@/v2/components/search/search'; +import MultiSelect from '@/v2/components/select/multiSelect'; +import SingleSelect, { Option } from '@/v2/components/select/singleSelect'; + +import { AutoReloadHelper } from '@/utils/autoReloadHelper'; +import { AxiosGetHelper } from "@/utils/axiosRequestHelper"; +import { nullAwareLocaleCompare, showDataFetchError } from '@/utils/common'; +import { useDebounce } from '@/v2/hooks/debounce.hook'; + +import { + Bucket, + BucketLayout, + BucketLayoutTypeList, + BucketResponse, + BucketsState, + BucketStorage, + BucketStorageTypeList +} from '@/v2/types/bucket.types'; + +import './buckets.less'; + + +const LIMIT_OPTIONS: Option[] = [ + { + label: '1000', + value: '1000' + }, + { + label: '5000', + value: '5000' + }, + { + label: '10000', + value: '10000' + }, + { + label: '20000', + value: '20000' + } +] + +const renderIsVersionEnabled = (isVersionEnabled: boolean) => { + return isVersionEnabled + ? + : +}; + +const renderStorageType = (bucketStorage: BucketStorage) => { + const bucketStorageIconMap: Record = { + RAM_DISK: , + SSD: , + DISK: , + ARCHIVE: + }; + const icon = bucketStorage in bucketStorageIconMap + ? bucketStorageIconMap[bucketStorage] + : ; + return {icon} {bucketStorage}; +}; + +const renderBucketLayout = (bucketLayout: BucketLayout) => { + const bucketLayoutColorMap = { + FILE_SYSTEM_OPTIMIZED: 'green', + OBJECT_STORE: 'orange', + LEGACY: 'blue' + }; + const color = bucketLayout in bucketLayoutColorMap ? + bucketLayoutColorMap[bucketLayout] : ''; + return {bucketLayout}; +}; + +const SearchableColumnOpts = [{ + label: 'Bucket', + value: 'name' +}, { + label: 'Volume', + value: 'volumeName' +}] + +const COLUMNS: ColumnsType = [ + { + title: 'Bucket', + dataIndex: 'name', + key: 'name', + sorter: (a: Bucket, b: Bucket) => a.name.localeCompare(b.name), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Volume', + dataIndex: 'volumeName', + key: 'volumeName', + sorter: (a: Bucket, b: Bucket) => a.volumeName.localeCompare(b.volumeName), + defaultSortOrder: 'ascend' as const + }, + { + title: 'Owner', + dataIndex: 'owner', + key: 'owner', + sorter: (a: Bucket, b: Bucket) => nullAwareLocaleCompare(a.owner, b.owner) + }, + { + title: 'Versioning', + dataIndex: 'versioning', + key: 'isVersionEnabled', + render: (isVersionEnabled: boolean) => renderIsVersionEnabled(isVersionEnabled) + }, + { + title: 'Storage Type', + dataIndex: 'storageType', + key: 'storageType', + filterMultiple: true, + filters: BucketStorageTypeList.map(state => ({ text: state, value: state })), + onFilter: (value, record: Bucket) => record.storageType === value, + sorter: (a: Bucket, b: Bucket) => a.storageType.localeCompare(b.storageType), + render: (storageType: BucketStorage) => renderStorageType(storageType) + }, + { + title: 'Bucket Layout', + dataIndex: 'bucketLayout', + key: 'bucketLayout', + filterMultiple: true, + filters: BucketLayoutTypeList.map(state => ({ text: state, value: state })), + onFilter: (value, record: Bucket) => record.bucketLayout === value, + sorter: (a: Bucket, b: Bucket) => a.bucketLayout.localeCompare(b.bucketLayout), + render: (bucketLayout: BucketLayout) => renderBucketLayout(bucketLayout) + }, + { + title: 'Creation Time', + dataIndex: 'creationTime', + key: 'creationTime', + sorter: (a: Bucket, b: Bucket) => a.creationTime - b.creationTime, + render: (creationTime: number) => { + return creationTime > 0 ? moment(creationTime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Modification Time', + dataIndex: 'modificationTime', + key: 'modificationTime', + sorter: (a: Bucket, b: Bucket) => a.modificationTime - b.modificationTime, + render: (modificationTime: number) => { + return modificationTime > 0 ? moment(modificationTime).format('ll LTS') : 'NA'; + } + }, + { + title: 'Storage Capacity', + key: 'quotaCapacityBytes', + sorter: (a: Bucket, b: Bucket) => a.usedBytes - b.usedBytes, + render: (text: string, record: Bucket) => ( + + ) + }, + { + title: 'Namespace Capacity', + key: 'namespaceCapacity', + sorter: (a: Bucket, b: Bucket) => a.usedNamespace - b.usedNamespace, + render: (text: string, record: Bucket) => ( + + ) + }, + { + title: 'Source Volume', + dataIndex: 'sourceVolume', + key: 'sourceVolume', + render: (sourceVolume: string) => { + return sourceVolume ? sourceVolume : 'NA'; + } + }, + { + title: 'Source Bucket', + dataIndex: 'sourceBucket', + key: 'sourceBucket', + render: (sourceBucket: string) => { + return sourceBucket ? sourceBucket : 'NA'; + } + } +]; + +const defaultColumns = COLUMNS.map(column => ({ + label: column.title as string, + value: column.key as string +})); + +function getVolumeBucketMap(data: Bucket[]) { + const volumeBucketMap = data.reduce(( + map: Map>, + currentBucket + ) => { + const volume = currentBucket.volumeName; + if (map.has(volume)) { + const buckets = Array.from(map.get(volume)!); + map.set(volume, new Set([...buckets, currentBucket])); + } else { + map.set(volume, new Set().add(currentBucket)); + } + return map; + }, new Map>()); + return volumeBucketMap; +} + +function getFilteredBuckets( + selectedVolumes: Option[], + bucketsMap: Map> +) { + let selectedBuckets: Bucket[] = []; + selectedVolumes.forEach(selectedVolume => { + if (bucketsMap.has(selectedVolume.value) + && bucketsMap.get(selectedVolume.value)) { + selectedBuckets = [ + ...selectedBuckets, + ...Array.from(bucketsMap.get(selectedVolume.value)!) + ]; + } + }); + + return selectedBuckets; +} + +const Buckets: React.FC<{}> = () => { + + let cancelSignal: AbortController; + + const [state, setState] = useState({ + totalCount: 0, + lastUpdated: 0, + columnOptions: defaultColumns, + volumeBucketMap: new Map>(), + bucketsUnderVolume: [], + volumeOptions: [], + }); + const [loading, setLoading] = useState(false); + const [selectedColumns, setSelectedColumns] = useState(defaultColumns); + const [selectedVolumes, setSelectedVolumes] = useState([]); + const [selectedLimit, setSelectedLimit] = useState