deletedKeys = impl.listTrash(
- request.getVolumeName(),
- request.getBucketName(),
- request.getStartKeyName(),
- request.getKeyPrefix(),
- request.getMaxKeys());
-
- for (RepeatedOmKeyInfo key: deletedKeys) {
- resp.addDeletedKeys(key.getProto(false, clientVersion));
- }
-
- return resp.build();
- }
-
@RequestFeatureValidator(
conditions = ValidationCondition.OLDER_CLIENT_REQUESTS,
processingPhase = RequestProcessingPhase.POST_PROCESS,
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java
deleted file mode 100644
index 4f0c15f15e53..000000000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om;
-
-
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.hdds.utils.db.DBConfigFromFile;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.util.ExitUtils;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.io.TempDir;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.Collections;
-
-/**
- * Test Key Trash Service.
- *
- * This test does the things including:
- * 1. UTs for list trash.
- * 2. UTs for recover trash.
- * 3. UTs for empty trash.
- *
- */
-public class TestTrashService {
-
- @TempDir
- private Path tempFolder;
-
- private KeyManager keyManager;
- private OzoneManagerProtocol writeClient;
- private OzoneManager om;
- private String volumeName;
- private String bucketName;
-
- @BeforeEach
- void setup() throws Exception {
- ExitUtils.disableSystemExit();
- OzoneConfiguration configuration = new OzoneConfiguration();
-
- File folder = tempFolder.toFile();
- if (!folder.exists()) {
- assertTrue(folder.mkdirs());
- }
- System.setProperty(DBConfigFromFile.CONFIG_DIR, "/");
- ServerUtils.setOzoneMetaDirPath(configuration, folder.toString());
-
- OmTestManagers omTestManagers
- = new OmTestManagers(configuration);
- keyManager = omTestManagers.getKeyManager();
- writeClient = omTestManagers.getWriteClient();
- om = omTestManagers.getOzoneManager();
- volumeName = "volume";
- bucketName = "bucket";
- }
-
- @AfterEach
- public void cleanup() throws Exception {
- om.stop();
- }
-
- @Test
- public void testRecoverTrash() throws IOException {
- String keyName = "testKey";
- String destinationBucket = "destBucket";
- createAndDeleteKey(keyName);
-
- boolean recoverOperation = keyManager.getMetadataManager()
- .recoverTrash(volumeName, bucketName, keyName, destinationBucket);
- assertTrue(recoverOperation);
- }
-
- private void createAndDeleteKey(String keyName) throws IOException {
-
- OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(),
- OmVolumeArgs.newBuilder()
- .setOwnerName("owner")
- .setAdminName("admin")
- .setVolume(volumeName)
- .build());
-
- OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(),
- OmBucketInfo.newBuilder()
- .setVolumeName(volumeName)
- .setBucketName(bucketName)
- .build());
-
- OmKeyArgs keyArgs = new OmKeyArgs.Builder()
- .setVolumeName(volumeName)
- .setBucketName(bucketName)
- .setKeyName(keyName)
- .setAcls(Collections.emptyList())
- .setLocationInfoList(new ArrayList<>())
- .setReplicationConfig(StandaloneReplicationConfig
- .getInstance(HddsProtos.ReplicationFactor.ONE))
- .setOwnerName(UserGroupInformation.getCurrentUser().getShortUserName())
- .build();
-
- /* Create and delete key in the Key Manager. */
- OpenKeySession session = writeClient.openKey(keyArgs);
- writeClient.commitKey(keyArgs, session.getId());
- writeClient.deleteKey(keyArgs);
- }
-
-}
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
index 41876c6e2454..e3e3537b1c3b 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
@@ -43,7 +43,6 @@
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatusLight;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.helpers.S3VolumeContext;
import org.apache.hadoop.ozone.om.helpers.TenantStateList;
@@ -301,21 +300,6 @@ public List listKeys(String volumeName, String bucketName,
return null;
}
- @Override
- public List listTrash(String volumeName, String bucketName,
- String startKeyName,
- String keyPrefix, int maxKeys)
- throws IOException {
- return null;
- }
-
- @Override
- public boolean recoverTrash(String volumeName, String bucketName,
- String keyName, String destinationBucket)
- throws IOException {
- return false;
- }
-
@Override
public OzoneKeyDetails getKeyDetails(String volumeName, String bucketName,
String keyName) throws IOException {
From 2e30dc182c9c3e50b0c023f5a4915fa6a88e5eb8 Mon Sep 17 00:00:00 2001
From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com>
Date: Tue, 27 Aug 2024 15:27:39 +0530
Subject: [PATCH 009/106] HDDS-11190. Add --fields option to ldb scan command
(#6976)
---
.../apache/hadoop/ozone/debug/DBScanner.java | 122 +++++++++++++++++-
.../hadoop/ozone/debug/ValueSchema.java | 17 +--
2 files changed, 126 insertions(+), 13 deletions(-)
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
index 0c38fbe33ba1..4653aa3eeb31 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
@@ -55,9 +55,11 @@
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.PrintWriter;
+import java.lang.reflect.Field;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -121,6 +123,11 @@ public class DBScanner implements Callable, SubcommandWithParent {
description = "Key at which iteration of the DB ends")
private String endKey;
+ @CommandLine.Option(names = {"--fields"},
+ description = "Comma-separated list of fields needed for each value. " +
+ "eg.) \"name,acls.type\" for showing name and type under acls.")
+ private String fieldsFilter;
+
@CommandLine.Option(names = {"--dnSchema", "--dn-schema", "-d"},
description = "Datanode DB Schema Version: V1/V2/V3",
defaultValue = "V3")
@@ -291,7 +298,7 @@ private void processRecords(ManagedRocksIterator iterator,
}
Future future = threadPool.submit(
new Task(dbColumnFamilyDef, batch, logWriter, sequenceId,
- withKey, schemaV3));
+ withKey, schemaV3, fieldsFilter));
futures.add(future);
batch = new ArrayList<>(batchSize);
sequenceId++;
@@ -299,7 +306,7 @@ private void processRecords(ManagedRocksIterator iterator,
}
if (!batch.isEmpty()) {
Future future = threadPool.submit(new Task(dbColumnFamilyDef,
- batch, logWriter, sequenceId, withKey, schemaV3));
+ batch, logWriter, sequenceId, withKey, schemaV3, fieldsFilter));
futures.add(future);
}
@@ -465,22 +472,51 @@ private static class Task implements Callable {
private final long sequenceId;
private final boolean withKey;
private final boolean schemaV3;
+ private String valueFields;
Task(DBColumnFamilyDefinition dbColumnFamilyDefinition,
ArrayList batch, LogWriter logWriter,
- long sequenceId, boolean withKey, boolean schemaV3) {
+ long sequenceId, boolean withKey, boolean schemaV3, String valueFields) {
this.dbColumnFamilyDefinition = dbColumnFamilyDefinition;
this.batch = batch;
this.logWriter = logWriter;
this.sequenceId = sequenceId;
this.withKey = withKey;
this.schemaV3 = schemaV3;
+ this.valueFields = valueFields;
+ }
+
+ Map getFieldSplit(List fields, Map fieldMap) {
+ int len = fields.size();
+ if (fieldMap == null) {
+ fieldMap = new HashMap<>();
+ }
+ if (len == 1) {
+ fieldMap.putIfAbsent(fields.get(0), null);
+ } else {
+ Map fieldMapGet = (Map) fieldMap.get(fields.get(0));
+ if (fieldMapGet == null) {
+ fieldMap.put(fields.get(0), getFieldSplit(fields.subList(1, len), null));
+ } else {
+ fieldMap.put(fields.get(0), getFieldSplit(fields.subList(1, len), fieldMapGet));
+ }
+ }
+ return fieldMap;
}
@Override
public Void call() {
try {
ArrayList results = new ArrayList<>(batch.size());
+ Map fieldsSplitMap = new HashMap<>();
+
+ if (valueFields != null) {
+ for (String field : valueFields.split(",")) {
+ String[] subfields = field.split("\\.");
+ fieldsSplitMap = getFieldSplit(Arrays.asList(subfields), fieldsSplitMap);
+ }
+ }
+
for (ByteArrayKeyValue byteArrayKeyValue : batch) {
StringBuilder sb = new StringBuilder();
if (!(sequenceId == FIRST_SEQUENCE_ID && results.isEmpty())) {
@@ -515,16 +551,92 @@ public Void call() {
Object o = dbColumnFamilyDefinition.getValueCodec()
.fromPersistedFormat(byteArrayKeyValue.getValue());
- sb.append(WRITER.writeValueAsString(o));
+
+ if (valueFields != null) {
+ Map filteredValue = new HashMap<>();
+ filteredValue.putAll(getFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsSplitMap));
+ sb.append(WRITER.writeValueAsString(filteredValue));
+ } else {
+ sb.append(WRITER.writeValueAsString(o));
+ }
+
results.add(sb.toString());
}
logWriter.log(results, sequenceId);
- } catch (Exception e) {
+ } catch (IOException e) {
exception = true;
LOG.error("Exception parse Object", e);
}
return null;
}
+
+ Map getFilteredObject(Object obj, Class> clazz, Map fieldsSplitMap) {
+ Map valueMap = new HashMap<>();
+ for (Map.Entry field : fieldsSplitMap.entrySet()) {
+ try {
+ Field valueClassField = getRequiredFieldFromAllFields(clazz, field.getKey());
+ Object valueObject = valueClassField.get(obj);
+ Map subfields = (Map) field.getValue();
+
+ if (subfields == null) {
+ valueMap.put(field.getKey(), valueObject);
+ } else {
+ if (Collection.class.isAssignableFrom(valueObject.getClass())) {
+ List