diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index 982b559c7a5..2d0b2bb56fd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -47,6 +47,9 @@ public enum OzoneManagerVersion implements ComponentVersion { LIGHTWEIGHT_LIST_STATUS(8, "OzoneManager version that supports lightweight" + " listStatus API."), + S3_OBJECT_TAGGING_API(9, "OzoneManager version that supports S3 object tagging APIs, such as " + + "PutObjectTagging, GetObjectTagging, and DeleteObjectTagging"), + FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 216b51b8e86..405f3a42e9e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -1046,6 +1046,37 @@ public void setTimes(String keyName, long mtime, long atime) proxy.setTimes(ozoneObj, keyName, mtime, atime); } + /** + * Gets the tags for an existing key. + * @param keyName Key name. + * @return Tags for the specified key. + * @throws IOException + */ + public Map getObjectTagging(String keyName) + throws IOException { + return proxy.getObjectTagging(volumeName, name, keyName); + } + + /** + * Sets the tags to an existing key. + * @param keyName Key name. + * @param tags Tags to set on the key. + * @throws IOException + */ + public void putObjectTagging(String keyName, Map tags) + throws IOException { + proxy.putObjectTagging(volumeName, name, keyName, tags); + } + + /** + * Removes all the tags from an existing key. + * @param keyName Key name + * @throws IOException + */ + public void deleteObjectTagging(String keyName) throws IOException { + proxy.deleteObjectTagging(volumeName, name, keyName); + } + public void setSourcePathExist(boolean b) { this.sourcePathExist = b; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 8d9614b554a..c0bffaf8950 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -1311,4 +1311,38 @@ void setTimes(OzoneObj obj, String keyName, long mtime, long atime) * @throws IOException */ void recoverKey(OmKeyArgs args, long clientID) throws IOException; + + /** + * Gets the tags for an existing key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @return tags for the specified key. + * @throws IOException + */ + Map getObjectTagging(String volumeName, String bucketName, String keyName) + throws IOException; + + /** + * Sets the tags to an existing key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @param tags Tags to set on the key. + * @throws IOException + */ + void putObjectTagging(String volumeName, String bucketName, String keyName, + Map tags) throws IOException; + + + /** + * Removes all the tags from the specified key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @throws IOException + */ + void deleteObjectTagging(String volumeName, String bucketName, String keyName) + throws IOException; + } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index fe986640176..73692b37c59 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -2727,6 +2727,61 @@ public void recoverKey(OmKeyArgs args, long clientID) throws IOException { ozoneManagerClient.recoverKey(args, clientID); } + @Override + public Map getObjectTagging(String volumeName, String bucketName, String keyName) + throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_OBJECT_TAGGING_API) < 0) { + throw new IOException("OzoneManager does not support S3 object tagging API"); + } + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .build(); + return ozoneManagerClient.getObjectTagging(keyArgs); + } + + @Override + public void putObjectTagging(String volumeName, String bucketName, + String keyName, Map tags) throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_OBJECT_TAGGING_API) < 0) { + throw new IOException("OzoneManager does not support S3 object tagging API"); + } + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .addAllTags(tags) + .build(); + ozoneManagerClient.putObjectTagging(keyArgs); + } + + @Override + public void deleteObjectTagging(String volumeName, String bucketName, + String keyName) throws IOException { + if (omVersion.compareTo(OzoneManagerVersion.S3_OBJECT_TAGGING_API) < 0) { + throw new IOException("OzoneManager does not support S3 object tagging API"); + } + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .build(); + ozoneManagerClient.deleteObjectTagging(keyArgs); + } + private static ExecutorService createThreadPoolExecutor( int corePoolSize, int maximumPoolSize, String threadNameFormat) { return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 9565d2db8ef..8d24f2de155 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -278,6 +278,7 @@ public static boolean isReadOnly( case SetSafeMode: case PrintCompactionLogDag: case GetSnapshotInfo: + case GetObjectTagging: case GetQuotaRepairStatus: case StartQuotaRepair: return true; @@ -339,6 +340,8 @@ public static boolean isReadOnly( case AbortExpiredMultiPartUploads: case SetSnapshotProperty: case QuotaRepair: + case PutObjectTagging: + case DeleteObjectTagging: case UnknownCommand: return false; case EchoRPC: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java index 7d4e769365f..99e2759117e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/IOmMetadataReader.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; /** * Protocol for OmMetadataReader's. @@ -165,4 +166,11 @@ ListKeysLightResult listKeysLight(String volumeName, String bucketName, * @throws IOException if there is error. */ List getAcl(OzoneObj obj) throws IOException; + + /** + * Gets the tags for the specified key. + * @param args Key args + * @return Tags associated with the key. + */ + Map getObjectTagging(OmKeyArgs args) throws IOException; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java index db07a19b211..0b9b4b38a51 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java @@ -34,8 +34,8 @@ private KeyValueUtil() { /** * Parse Key,Value map data from protobuf representation. */ - public static Map getFromProtobuf(List metadata) { - return metadata.stream() + public static Map getFromProtobuf(List keyValueList) { + return keyValueList.stream() .collect(Collectors.toMap(KeyValue::getKey, KeyValue::getValue)); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 94822630f8e..7f633d7ea73 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -1145,6 +1145,31 @@ void setTimes(OmKeyArgs keyArgs, long mtime, long atime) boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException; + /** + * Gets the tags for the specified key. + * @param args Key args + * @return Tags associated with the key. + */ + Map getObjectTagging(OmKeyArgs args) throws IOException; + + /** + * Sets the tags to an existing key. + * @param args Key args + */ + default void putObjectTagging(OmKeyArgs args) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented, as write requests use a new approach."); + } + + /** + * Removes all the tags from the specified key. + * @param args Key args + */ + default void deleteObjectTagging(OmKeyArgs args) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented, as write requests use a new approach."); + } + /** * Get status of last triggered quota repair in OM. * @return String diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index b140cf95e69..a748efac722 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -108,6 +108,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteSnapshotRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantResponse; @@ -125,6 +126,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextRequest; @@ -174,6 +177,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrintCompactionLogDagRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseRequest; @@ -2580,6 +2584,72 @@ public void startQuotaRepair(List buckets) throws IOException { handleError(submitRequest(omRequest)); } + @Override + public Map getObjectTagging(OmKeyArgs args) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .build(); + + GetObjectTaggingRequest req = + GetObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + OMRequest omRequest = createOMRequest(Type.GetObjectTagging) + .setGetObjectTaggingRequest(req) + .build(); + + GetObjectTaggingResponse resp = + handleError(submitRequest(omRequest)).getGetObjectTaggingResponse(); + + return KeyValueUtil.getFromProtobuf(resp.getTagsList()); + } + + @Override + public void putObjectTagging(OmKeyArgs args) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .addAllTags(KeyValueUtil.toProtobuf(args.getTags())) + .build(); + + PutObjectTaggingRequest req = + PutObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + OMRequest omRequest = createOMRequest(Type.PutObjectTagging) + .setPutObjectTaggingRequest(req) + .build(); + + OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); + } + + @Override + public void deleteObjectTagging(OmKeyArgs args) throws IOException { + KeyArgs keyArgs = KeyArgs.newBuilder() + .setVolumeName(args.getVolumeName()) + .setBucketName(args.getBucketName()) + .setKeyName(args.getKeyName()) + .build(); + + DeleteObjectTaggingRequest req = + DeleteObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + OMRequest omRequest = createOMRequest(Type.DeleteObjectTagging) + .setDeleteObjectTaggingRequest(req) + .build(); + + OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); + } + private SafeMode toProtoBuf(SafeModeAction action) { switch (action) { case ENTER: diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot index 12fb985348a..82a985f1d50 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot @@ -44,6 +44,7 @@ Put object to s3 Get object from s3 ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile.result Compare files /tmp/testfile /tmp/testfile.result + Should not contain ${result} TagCount ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte /tmp/zerobyte.result Compare files /tmp/zerobyte /tmp/zerobyte.result diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objecttagging.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objecttagging.robot new file mode 100644 index 00000000000..9098673680d --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objecttagging.robot @@ -0,0 +1,73 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +*** Settings *** +Documentation S3 gateway test with aws cli +Library OperatingSystem +Library String +Resource ../commonlib.robot +Resource commonawslib.robot +Test Timeout 5 minutes +Suite Setup Setup s3 tests + +*** Variables *** +${ENDPOINT_URL} http://s3g:9878 +${OZONE_TEST} true +${BUCKET} generated + + +*** Test Cases *** + +Put object tagging +# Create an object and call put-object-tagging + Execute echo "Randomtext" > /tmp/testfile + ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 --body /tmp/testfile + ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/putobject/key=value/ + Should contain ${result} f1 + + ${result} = Execute AWSS3ApiCli put-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 --tagging '{"TagSet": [{ "Key": "tag-key1", "Value": "tag-value1" }]}' + ${result} = Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile2.result + Should contain ${result} TagCount + ${tagCount} = Execute and checkrc echo '${result}' | jq -r '.TagCount' 0 + Should Be Equal ${tagCount} 1 + +# Calling put-object-tagging again to overwrite the existing tags + ${result} = Execute AWSS3ApiCli put-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 --tagging '{"TagSet": [{ "Key": "tag-key2", "Value": "tag-value2" },{ "Key": "tag-key3", "Value": "tag-value3" }]}' + ${result} = Execute AWSS3APICli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile2.result + Should contain ${result} TagCount + ${tagCount} = Execute and checkrc echo '${result}' | jq -r '.TagCount' 0 + Should Be Equal ${tagCount} 2 + +# Calling put-object-tagging on non-existent key + ${result} = Execute AWSS3APICli and checkrc put-object-tagging --bucket ${BUCKET} --key ${PREFIX}/nonexistent --tagging '{"TagSet": [{ "Key": "tag-key1", "Value": "tag-value1" }]}' 255 + Should contain ${result} NoSuchKey + +#This test depends on the previous test case. Can't be executes alone +Get object tagging + + ${result} = Execute AWSS3ApiCli get-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 + Should contain ${result} TagSet + ${tagCount} = Execute and checkrc echo '${result}' | jq '.TagSet | length' 0 + Should Be Equal ${tagCount} 2 + + +#This test depends on the previous test case. Can't be executes alone +Delete object tagging + + ${result} = Execute AWSS3ApiCli delete-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 + ${result} = Execute AWSS3ApiCli get-object-tagging --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 + Should contain ${result} TagSet + ${tagCount} = Execute and checkrc echo '${result}' | jq '.TagSet | length' 0 + Should Be Equal ${tagCount} 0 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh index b9a4c68587d..ab2807167d0 100755 --- a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh +++ b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh @@ -83,5 +83,6 @@ run_robot_test objectcopy run_robot_test objectmultidelete run_robot_test objecthead run_robot_test MultipartUpload +run_robot_test objecttagging rebot --outputdir results/ results/*.xml diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 8a55dc7b7d0..6d9bd23e2ee 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -139,7 +139,7 @@ private CompleteMultipartUploadRequest.Part uploadPart(String uploadID, ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(BUCKET, KEY, content.length(), - partNumber, uploadID, body); + partNumber, uploadID, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -168,7 +168,7 @@ private void completeMultipartUpload( private void getObjectMultipart(int partNumber, long bytes) throws IOException, OS3Exception { Response response = - REST.get(BUCKET, KEY, partNumber, null, 100, null); + REST.get(BUCKET, KEY, partNumber, null, 100, null, null); assertEquals(200, response.getStatus()); assertEquals(bytes, response.getLength()); assertEquals("3", response.getHeaderString(MP_PARTS_COUNT)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index 9ed0b182d33..69d516b9f51 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -4988,4 +4988,136 @@ public void reset() throws IOException { init(); } } + + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testPutObjectTagging(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, anyReplication(), new HashMap<>()); + out.write(value.getBytes(UTF_8)); + out.close(); + + OzoneKey key = bucket.getKey(keyName); + assertTrue(key.getTags().isEmpty()); + + Map tags = new HashMap<>(); + tags.put("tag-key-1", "tag-value-1"); + tags.put("tag-key-2", "tag-value-2"); + + bucket.putObjectTagging(keyName, tags); + + OzoneKey updatedKey = bucket.getKey(keyName); + assertEquals(tags.size(), updatedKey.getTags().size()); + assertEquals(key.getModificationTime(), updatedKey.getModificationTime()); + assertThat(updatedKey.getTags()).containsAllEntriesOf(tags); + + // Do another putObjectTagging, it should override the previous one + Map secondTags = new HashMap<>(); + secondTags.put("tag-key-3", "tag-value-3"); + + bucket.putObjectTagging(keyName, secondTags); + + updatedKey = bucket.getKey(keyName); + assertEquals(secondTags.size(), updatedKey.getTags().size()); + assertThat(updatedKey.getTags()).containsAllEntriesOf(secondTags); + assertThat(updatedKey.getTags()).doesNotContainKeys("tag-key-1", "tag-key-2"); + + if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + String dirKey = "dir1/"; + bucket.createDirectory(dirKey); + OMException exception = assertThrows(OMException.class, + () -> bucket.putObjectTagging(dirKey, tags)); + assertThat(exception.getResult()).isEqualTo(ResultCodes.NOT_SUPPORTED_OPERATION); + } + } + + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testDeleteObjectTagging(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + + Map tags = new HashMap<>(); + tags.put("tag-key-1", "tag-value-1"); + tags.put("tag-key-2", "tag-value-2"); + + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, anyReplication(), new HashMap<>(), tags); + out.write(value.getBytes(UTF_8)); + out.close(); + + OzoneKey key = bucket.getKey(keyName); + assertFalse(key.getTags().isEmpty()); + + bucket.deleteObjectTagging(keyName); + + OzoneKey updatedKey = bucket.getKey(keyName); + assertEquals(0, updatedKey.getTags().size()); + assertEquals(key.getModificationTime(), updatedKey.getModificationTime()); + + if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + String dirKey = "dir1/"; + bucket.createDirectory(dirKey); + OMException exception = assertThrows(OMException.class, + () -> bucket.deleteObjectTagging(dirKey)); + assertThat(exception.getResult()).isEqualTo(ResultCodes.NOT_SUPPORTED_OPERATION); + } + } + + @ParameterizedTest + @MethodSource("bucketLayouts") + public void testGetObjectTagging(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + + String value = "sample value"; + store.createVolume(volumeName); + OzoneVolume volume = store.getVolume(volumeName); + BucketArgs bucketArgs = + BucketArgs.newBuilder().setBucketLayout(bucketLayout).build(); + volume.createBucket(bucketName, bucketArgs); + OzoneBucket bucket = volume.getBucket(bucketName); + + String keyName = UUID.randomUUID().toString(); + + Map tags = new HashMap<>(); + tags.put("tag-key-1", "tag-value-1"); + tags.put("tag-key-2", "tag-value-2"); + + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, anyReplication(), new HashMap<>(), tags); + out.write(value.getBytes(UTF_8)); + out.close(); + + OzoneKey key = bucket.getKey(keyName); + assertEquals(tags.size(), key.getTags().size()); + + Map tagsRetrieved = bucket.getObjectTagging(keyName); + + assertEquals(tags.size(), tagsRetrieved.size()); + assertThat(tagsRetrieved).containsAllEntriesOf(tags); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 0481ee4a867..eafa193ae2b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -340,6 +340,9 @@ public void testKeyOps() throws Exception { long initialNumKeyLists = getLongCounter("NumKeyLists", omMetrics); long initialNumKeys = getLongCounter("NumKeys", omMetrics); long initialNumInitiateMultipartUploads = getLongCounter("NumInitiateMultipartUploads", omMetrics); + long initialNumGetObjectTagging = getLongCounter("NumGetObjectTagging", omMetrics); + long initialNumPutObjectTagging = getLongCounter("NumPutObjectTagging", omMetrics); + long initialNumDeleteObjectTagging = getLongCounter("NumDeleteObjectTagging", omMetrics); long initialEcKeyCreateTotal = getLongCounter("EcKeyCreateTotal", omMetrics); long initialNumKeyAllocateFails = getLongCounter("NumKeyAllocateFails", omMetrics); @@ -349,6 +352,9 @@ public void testKeyOps() throws Exception { long initialNumBlockAllocationFails = getLongCounter("NumBlockAllocationFails", omMetrics); long initialNumKeyListFails = getLongCounter("NumKeyListFails", omMetrics); long initialEcKeyCreateFailsTotal = getLongCounter("EcKeyCreateFailsTotal", omMetrics); + long initialNumGetObjectTaggingFails = getLongCounter("NumGetObjectTaggingFails", omMetrics); + long initialNumPutObjectTaggingFails = getLongCounter("NumPutObjectTaggingFails", omMetrics); + long initialNumDeleteObjectTaggingFails = getLongCounter("NumDeleteObjectTaggingFails", omMetrics); // see HDDS-10078 for making this work with FILE_SYSTEM_OPTIMIZED layout TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); @@ -358,13 +364,16 @@ public void testKeyOps() throws Exception { omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 7, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 10, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 1, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 1, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics)); assertEquals(initialNumKeyLists + 1, getLongCounter("NumKeyLists", omMetrics)); assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); assertEquals(initialNumInitiateMultipartUploads + 1, getLongCounter("NumInitiateMultipartUploads", omMetrics)); + assertEquals(initialNumGetObjectTagging + 1, getLongCounter("NumGetObjectTagging", omMetrics)); + assertEquals(initialNumPutObjectTagging + 1, getLongCounter("NumPutObjectTagging", omMetrics)); + assertEquals(initialNumDeleteObjectTagging + 1, getLongCounter("NumDeleteObjectTagging", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); @@ -406,6 +415,7 @@ public void testKeyOps() throws Exception { doThrow(exception).when(mockKm).lookupKey(any(), any(), any()); doThrow(exception).when(mockKm).listKeys( any(), any(), any(), any(), anyInt()); + doThrow(exception).when(mockKm).getObjectTagging(any(), any()); OmMetadataReader omMetadataReader = (OmMetadataReader) ozoneManager.getOmMetadataReader().get(); HddsWhiteboxTestUtils.setInternalState( @@ -421,7 +431,7 @@ public void testKeyOps() throws Exception { doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); - assertEquals(initialNumKeyOps + 28, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyOps + 37, getLongCounter("NumKeyOps", omMetrics)); assertEquals(initialNumKeyAllocate + 6, getLongCounter("NumKeyAllocate", omMetrics)); assertEquals(initialNumKeyLookup + 3, getLongCounter("NumKeyLookup", omMetrics)); assertEquals(initialNumKeyDeletes + 4, getLongCounter("NumKeyDeletes", omMetrics)); @@ -435,6 +445,9 @@ public void testKeyOps() throws Exception { assertEquals(initialNumInitiateMultipartUploadFails + 1, getLongCounter( "NumInitiateMultipartUploadFails", omMetrics)); assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumGetObjectTaggingFails + 1, getLongCounter("NumGetObjectTaggingFails", omMetrics)); + assertEquals(initialNumPutObjectTaggingFails + 1, getLongCounter("NumPutObjectTaggingFails", omMetrics)); + assertEquals(initialNumDeleteObjectTaggingFails + 1, getLongCounter("NumDeleteObjectTaggingFails", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); @@ -836,6 +849,21 @@ private void doKeyOps(OmKeyArgs keyArgs) { } catch (IOException ignored) { } + try { + writeClient.putObjectTagging(keyArgs); + } catch (IOException ignored) { + } + + try { + writeClient.getObjectTagging(keyArgs); + } catch (IOException ignored) { + } + + try { + writeClient.deleteObjectTagging(keyArgs); + } catch (IOException ignored) { + } + try { writeClient.deleteKey(keyArgs); } catch (IOException ignored) { diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index f71dc44fec5..9971506fa95 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -153,6 +153,9 @@ enum Type { GetQuotaRepairStatus = 135; StartQuotaRepair = 136; SnapshotMoveTableKeys = 137; + PutObjectTagging = 140; + GetObjectTagging = 141; + DeleteObjectTagging = 142; } enum SafeMode { @@ -292,9 +295,14 @@ message OMRequest { optional RenameSnapshotRequest RenameSnapshotRequest = 129; optional ListOpenFilesRequest ListOpenFilesRequest = 130; optional QuotaRepairRequest QuotaRepairRequest = 131; + optional GetQuotaRepairStatusRequest GetQuotaRepairStatusRequest = 133; optional StartQuotaRepairRequest StartQuotaRepairRequest = 134; optional SnapshotMoveTableKeysRequest SnapshotMoveTableKeysRequest = 135; + + optional GetObjectTaggingRequest getObjectTaggingRequest = 140; + optional PutObjectTaggingRequest putObjectTaggingRequest = 141; + optional DeleteObjectTaggingRequest deleteObjectTaggingRequest = 142; } message OMResponse { @@ -424,6 +432,10 @@ message OMResponse { optional QuotaRepairResponse QuotaRepairResponse = 134; optional GetQuotaRepairStatusResponse GetQuotaRepairStatusResponse = 136; optional StartQuotaRepairResponse StartQuotaRepairResponse = 137; + + optional GetObjectTaggingResponse getObjectTaggingResponse = 140; + optional PutObjectTaggingResponse putObjectTaggingResponse = 141; + optional DeleteObjectTaggingResponse deleteObjectTaggingResponse = 142; } enum Status { @@ -2259,6 +2271,28 @@ message OMLockDetailsProto { optional uint64 writeLockNanos = 4; } +message PutObjectTaggingRequest { + required KeyArgs keyArgs = 1; +} + +message PutObjectTaggingResponse { +} + +message GetObjectTaggingRequest { + required KeyArgs keyArgs = 1; +} + +message GetObjectTaggingResponse { + repeated hadoop.hdds.KeyValue tags = 1; +} + +message DeleteObjectTaggingRequest { + required KeyArgs keyArgs = 1; +} + +message DeleteObjectTaggingResponse { +} + /** The OM service that takes care of Ozone namespace. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index 081477adf4d..5fd9fd6d595 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -109,7 +109,11 @@ public enum OMAction implements AuditAction { UPGRADE_CANCEL, UPGRADE_FINALIZE, - LIST_OPEN_FILES; + LIST_OPEN_FILES, + + GET_OBJECT_TAGGING, + PUT_OBJECT_TAGGING, + DELETE_OBJECT_TAGGING; @Override public String getAction() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index a0f3053d731..9f6d8b81c10 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -39,6 +39,7 @@ import java.time.Duration; import java.util.ArrayList; import java.util.List; +import java.util.Map; /** * Handles key level commands. @@ -177,6 +178,17 @@ ExpiredOpenKeys getExpiredOpenKeys(Duration expireThreshold, int count, List getExpiredMultipartUploads( Duration expireThreshold, int maxParts) throws IOException; + /** + * Look up an existing key from the OM table and retrieve the tags from + * the key info. + * + * @param args the args of the key provided by client. + * @param bucket the resolved parent bucket of the key. + * @return Map of the tag set associated with the key. + * @throws IOException + */ + Map getObjectTagging(OmKeyArgs args, ResolvedBucket bucket) throws IOException; + /** * Returns the metadataManager. * @return OMMetadataManager. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index e99bdea85ea..7532cf8b324 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -735,6 +735,16 @@ public List getExpiredMultipartUploads( maxParts); } + @Override + public Map getObjectTagging(OmKeyArgs args, ResolvedBucket bucket) throws IOException { + Preconditions.checkNotNull(args); + + OmKeyInfo value = captureLatencyNs(metrics.getLookupReadKeyInfoLatencyNs(), + () -> readKeyInfo(args, bucket.bucketLayout())); + + return value.getTags(); + } + @Override public OMMetadataManager getMetadataManager() { return metadataManager; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index cbe5205c10b..de4241b7ac4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -103,6 +103,10 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSetTime; private @Metric MutableCounterLong numGetKeyInfo; + private @Metric MutableCounterLong numGetObjectTagging; + private @Metric MutableCounterLong numPutObjectTagging; + private @Metric MutableCounterLong numDeleteObjectTagging; + // Failure Metrics private @Metric MutableCounterLong numVolumeCreateFails; private @Metric MutableCounterLong numVolumeUpdateFails; @@ -184,6 +188,10 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numListOpenFilesFails; private @Metric MutableCounterLong getNumGetKeyInfoFails; + private @Metric MutableCounterLong numGetObjectTaggingFails; + private @Metric MutableCounterLong numPutObjectTaggingFails; + private @Metric MutableCounterLong numDeleteObjectTaggingFails; + private @Metric MutableCounterLong numRecoverLeaseFails; // Metrics for total amount of data written @@ -913,6 +921,35 @@ public void incNumGetKeyInfoFails() { getNumGetKeyInfoFails.incr(); } + @Override + public void incNumGetObjectTagging() { + numGetObjectTagging.incr(); + numKeyOps.incr(); + } + + @Override + public void incNumGetObjectTaggingFails() { + numGetObjectTaggingFails.incr(); + } + + public void incNumPutObjectTagging() { + numPutObjectTagging.incr(); + numKeyOps.incr(); + } + + public void incNumPutObjectTaggingFails() { + numPutObjectTaggingFails.incr(); + } + + public void incNumDeleteObjectTagging() { + numDeleteObjectTagging.incr(); + numKeyOps.incr(); + } + + public void incNumDeleteObjectTaggingFails() { + numDeleteObjectTaggingFails.incr(); + } + @VisibleForTesting public long getNumVolumeCreates() { return numVolumeCreates.value(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java index a01855d1b63..fc1d9e0e96f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java @@ -144,6 +144,12 @@ public static void unregister() { @Metric(about = "readFromRockDb latency in listKeys") private MutableRate listKeysReadFromRocksDbLatencyNs; + @Metric(about = "resolveBucketLink latency in getObjectTagging") + private MutableRate getObjectTaggingResolveBucketLatencyNs; + + @Metric(about = "ACLs check in getObjectTagging") + private MutableRate getObjectTaggingAclCheckLatencyNs; + public void addLookupLatency(long latencyInNs) { lookupLatencyNs.add(latencyInNs); } @@ -248,7 +254,7 @@ public void setListKeysAveragePagination(long keyCount) { public void setListKeysOpsPerSec(float opsPerSec) { listKeysOpsPerSec.set(opsPerSec); } - + MutableRate getListKeysAclCheckLatencyNs() { return listKeysAclCheckLatencyNs; } @@ -280,4 +286,16 @@ public MutableRate getDeleteKeyResolveBucketAndAclCheckLatencyNs() { public void addListKeysReadFromRocksDbLatencyNs(long latencyInNs) { listKeysReadFromRocksDbLatencyNs.add(latencyInNs); } + + public MutableRate getGetObjectTaggingResolveBucketLatencyNs() { + return getObjectTaggingResolveBucketLatencyNs; + } + + public MutableRate getGetObjectTaggingAclCheckLatencyNs() { + return getObjectTaggingAclCheckLatencyNs; + } + + public void addGetObjectTaggingLatencyNs(long latencyInNs) { + getObjectTaggingAclCheckLatencyNs.add(latencyInNs); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java index fdee1b71287..08f2115387e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java @@ -382,6 +382,7 @@ public ListKeysLightResult listKeysLight(String volumeName, * @param obj Ozone object. * @throws IOException if there is error. */ + @Override public List getAcl(OzoneObj obj) throws IOException { String volumeName = obj.getVolumeName(); @@ -428,6 +429,45 @@ public List getAcl(OzoneObj obj) throws IOException { } } + @Override + public Map getObjectTagging(OmKeyArgs args) throws IOException { + long start = Time.monotonicNowNanos(); + + ResolvedBucket bucket = captureLatencyNs( + perfMetrics.getLookupResolveBucketLatencyNs(), + () -> ozoneManager.resolveBucketLink(args)); + + boolean auditSuccess = true; + Map auditMap = bucket.audit(args.toAuditMap()); + + OmKeyArgs resolvedArgs = bucket.update(args); + + try { + if (isAclEnabled) { + captureLatencyNs(perfMetrics.getGetObjectTaggingAclCheckLatencyNs(), + () -> checkAcls(ResourceType.KEY, StoreType.OZONE, + ACLType.READ, bucket, + args.getKeyName()) + ); + } + metrics.incNumGetObjectTagging(); + return keyManager.getObjectTagging(resolvedArgs, bucket); + } catch (Exception ex) { + metrics.incNumGetObjectTaggingFails(); + auditSuccess = false; + audit.logReadFailure(buildAuditMessageForFailure(OMAction.GET_OBJECT_TAGGING, + auditMap, ex)); + throw ex; + } finally { + if (auditSuccess) { + audit.logReadSuccess(buildAuditMessageForSuccess(OMAction.GET_OBJECT_TAGGING, + auditMap)); + } + + perfMetrics.addGetObjectTaggingLatencyNs(Time.monotonicNowNanos() - start); + } + } + /** * Checks if current caller has acl permissions. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java index 21b2e8b990a..171242310a0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReaderMetrics.java @@ -46,4 +46,8 @@ public interface OmMetadataReaderMetrics { void incNumKeyListFails(); void incNumGetAcl(); + + void incNumGetObjectTagging(); + + void incNumGetObjectTaggingFails(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java index f863c086028..acb3a41e120 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshot.java @@ -47,6 +47,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.stream.Collectors; @@ -185,6 +186,11 @@ public List getAcl(OzoneObj obj) throws IOException { return omMetadataReader.getAcl(normalizeOzoneObj(obj)); } + @Override + public Map getObjectTagging(OmKeyArgs args) throws IOException { + return omMetadataReader.getObjectTagging(normalizeOmKeyArgs(args)); + } + private OzoneObj normalizeOzoneObj(OzoneObj o) { if (o == null) { return null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java index 7560d453eb9..d00b12e94ce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java @@ -162,5 +162,22 @@ public void incNumGetAcl() { MutableCounterLong numKeyOps; private @Metric MutableCounterLong numFSOps; + + + private @Metric + MutableCounterLong numGetObjectTagging; + private @Metric + MutableCounterLong numGetObjectTaggingFails; + + @Override + public void incNumGetObjectTagging() { + numGetObjectTagging.incr(); + numKeyOps.incr(); + } + + @Override + public void incNumGetObjectTaggingFails() { + numGetObjectTaggingFails.incr(); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index b5ae80a02d8..705f7c9a01b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -4792,6 +4792,15 @@ public void startQuotaRepair(List buckets) throws IOException { new QuotaRepairTask(this).repair(buckets); } + @Override + public Map getObjectTagging(final OmKeyArgs args) + throws IOException { + try (ReferenceCounted rcReader = getReader(args)) { + return rcReader.get().getObjectTagging(args); + } + } + + /** * Write down Layout version of a finalized feature to DB on finalization. * @param lvm OMLayoutVersionManager diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java index 18ee42756ef..491f2dadbf8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/helpers/OMAuditLogger.java @@ -91,6 +91,9 @@ private static void init() { CMD_AUDIT_ACTION_MAP.put(Type.Prepare, OMAction.UPGRADE_PREPARE); CMD_AUDIT_ACTION_MAP.put(Type.CancelPrepare, OMAction.UPGRADE_CANCEL); CMD_AUDIT_ACTION_MAP.put(Type.FinalizeUpgrade, OMAction.UPGRADE_FINALIZE); + CMD_AUDIT_ACTION_MAP.put(Type.GetObjectTagging, OMAction.GET_OBJECT_TAGGING); + CMD_AUDIT_ACTION_MAP.put(Type.PutObjectTagging, OMAction.PUT_OBJECT_TAGGING); + CMD_AUDIT_ACTION_MAP.put(Type.DeleteObjectTagging, OMAction.DELETE_OBJECT_TAGGING); } private static OMAction getAction(OzoneManagerProtocolProtos.OMRequest request) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 30e14bc017e..dc634248c28 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -340,6 +340,16 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new S3ExpiredMultipartUploadsAbortRequest(omRequest); case QuotaRepair: return new OMQuotaRepairRequest(omRequest); + case PutObjectTagging: + keyArgs = omRequest.getPutObjectTaggingRequest().getKeyArgs(); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + break; + case DeleteObjectTagging: + keyArgs = omRequest.getDeleteObjectTaggingRequest().getKeyArgs(); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + break; default: throw new OMException("Unrecognized write command type request " + cmdType, OMException.ResultCodes.INVALID_REQUEST); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java index 4a5558ed7f1..5d542bfb912 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/BucketLayoutAwareOMKeyRequestFactory.java @@ -49,6 +49,10 @@ import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequestWithFSO; import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest; import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequestWithFSO; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3DeleteObjectTaggingRequestWithFSO; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3PutObjectTaggingRequest; +import org.apache.hadoop.ozone.om.request.s3.tagging.S3PutObjectTaggingRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import jakarta.annotation.Nonnull; @@ -191,6 +195,23 @@ public final class BucketLayoutAwareOMKeyRequestFactory { addRequestClass(Type.SetTimes, OMKeySetTimesRequestWithFSO.class, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // PutObjectTagging + addRequestClass(Type.PutObjectTagging, + S3PutObjectTaggingRequest.class, + BucketLayout.OBJECT_STORE); + addRequestClass(Type.PutObjectTagging, + S3PutObjectTaggingRequestWithFSO.class, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // DeleteObjectTagging + addRequestClass(Type.DeleteObjectTagging, + S3DeleteObjectTaggingRequest.class, + BucketLayout.OBJECT_STORE); + addRequestClass(Type.DeleteObjectTagging, + S3DeleteObjectTaggingRequestWithFSO.class, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + } private BucketLayoutAwareOMKeyRequestFactory() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java index 26487935a65..1b318354eeb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequestUtils.java @@ -99,4 +99,17 @@ private static boolean checkInSnapshotCache( } return false; } + + public static boolean shouldLogClientRequestFailure(IOException exception) { + if (!(exception instanceof OMException)) { + return true; + } + OMException omException = (OMException) exception; + switch (omException.getResult()) { + case KEY_NOT_FOUND: + return false; + default: + return true; + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java new file mode 100644 index 00000000000..6146e1ac105 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; +import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3DeleteObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles delete object tagging request. + */ +public class S3DeleteObjectTaggingRequest extends OMKeyRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3DeleteObjectTaggingRequest.class); + + public S3DeleteObjectTaggingRequest(OMRequest omRequest, BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + DeleteObjectTaggingRequest deleteObjectTaggingRequest = + super.preExecute(ozoneManager).getDeleteObjectTaggingRequest(); + Preconditions.checkNotNull(deleteObjectTaggingRequest); + + KeyArgs keyArgs = deleteObjectTaggingRequest.getKeyArgs(); + + String keyPath = keyArgs.getKeyName(); + keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), + keyPath, getBucketLayout()); + + KeyArgs.Builder newKeyArgs = + keyArgs.toBuilder() + .setKeyName(keyPath); + + KeyArgs resolvedArgs = resolveBucketAndCheckKeyAcls(newKeyArgs.build(), + ozoneManager, ACLType.WRITE); + return getOmRequest().toBuilder() + .setUserInfo(getUserInfo()) + .setDeleteObjectTaggingRequest( + deleteObjectTaggingRequest.toBuilder().setKeyArgs(resolvedArgs)) + .build(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); + + KeyArgs keyArgs = deleteObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumDeleteObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + String dbOzoneKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + + OmKeyInfo omKeyInfo = + omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); + if (omKeyInfo == null) { + throw new OMException("Key not found", KEY_NOT_FOUND); + } + + // Clear / delete the tags + omKeyInfo.getTags().clear(); + // Set the UpdateID to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache + omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry( + new CacheKey<>(dbOzoneKey), + CacheValue.get(trxnLogIndex, omKeyInfo) + ); + + omClientResponse = new S3DeleteObjectTaggingResponse( + omResponse.setDeleteObjectTaggingResponse(DeleteObjectTaggingResponse.newBuilder()).build(), + omKeyInfo + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3DeleteObjectTaggingResponse( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + markForAudit(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.DELETE_OBJECT_TAGGING, auditMap, exception, getOmRequest().getUserInfo() + )); + + switch (result) { + case SUCCESS: + LOG.debug("Delete object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumDeleteObjectTaggingFails(); + if (OMClientRequestUtils.shouldLogClientRequestFailure(exception)) { + LOG.error("Delete object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + } + break; + default: + LOG.error("Unrecognized Result for S3DeleteObjectTaggingRequest: {}", + deleteObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..fb0561702a6 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3DeleteObjectTaggingResponseWithFSO; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles delete object tagging request for FSO bucket. + */ +public class S3DeleteObjectTaggingRequestWithFSO extends S3DeleteObjectTaggingRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3DeleteObjectTaggingRequestWithFSO.class); + + public S3DeleteObjectTaggingRequestWithFSO(OMRequest omRequest, + BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); + + KeyArgs keyArgs = deleteObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumDeleteObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists( + omMetadataManager, volumeName, bucketName, keyName, 0, + ozoneManager.getDefaultReplicationConfig()); + + if (keyStatus == null) { + throw new OMException("Key not found. Key: " + keyName, ResultCodes.KEY_NOT_FOUND); + } + + boolean isDirectory = keyStatus.isDirectory(); + + if (isDirectory) { + throw new OMException("DeleteObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION); + } + + OmKeyInfo omKeyInfo = keyStatus.getKeyInfo(); + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); + final String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + + // Clear / delete the tags + omKeyInfo.getTags().clear(); + // Set the UpdateId to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache for file table. No need to check directory table since + // DeleteObjectTagging rejects operations on FSO directory + omMetadataManager.getKeyTable(getBucketLayout()) + .addCacheEntry(new CacheKey<>(dbKey), + CacheValue.get(trxnLogIndex, omKeyInfo)); + + omClientResponse = new S3DeleteObjectTaggingResponseWithFSO( + omResponse.setDeleteObjectTaggingResponse(DeleteObjectTaggingResponse.newBuilder()).build(), + omKeyInfo, volumeId, bucketId + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3DeleteObjectTaggingResponseWithFSO( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + switch (result) { + case SUCCESS: + LOG.debug("Delete object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumDeleteObjectTaggingFails(); + LOG.error("Delete object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + break; + default: + LOG.error("Unrecognized Result for S3DeleteObjectTaggingRequest: {}", + deleteObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java new file mode 100644 index 00000000000..aab67830383 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; +import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3PutObjectTaggingResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles put object tagging request. + */ +public class S3PutObjectTaggingRequest extends OMKeyRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3PutObjectTaggingRequest.class); + + public S3PutObjectTaggingRequest(OMRequest omRequest, BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + PutObjectTaggingRequest putObjectTaggingRequest = + super.preExecute(ozoneManager).getPutObjectTaggingRequest(); + Preconditions.checkNotNull(putObjectTaggingRequest); + + KeyArgs keyArgs = putObjectTaggingRequest.getKeyArgs(); + + String keyPath = keyArgs.getKeyName(); + keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), + keyPath, getBucketLayout()); + + KeyArgs.Builder newKeyArgs = + keyArgs.toBuilder() + .setKeyName(keyPath); + + KeyArgs resolvedArgs = resolveBucketAndCheckKeyAcls(newKeyArgs.build(), + ozoneManager, ACLType.WRITE); + return getOmRequest().toBuilder() + .setUserInfo(getUserInfo()) + .setPutObjectTaggingRequest( + putObjectTaggingRequest.toBuilder().setKeyArgs(resolvedArgs)) + .build(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); + + KeyArgs keyArgs = putObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumPutObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + String dbOzoneKey = + omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + + OmKeyInfo omKeyInfo = + omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); + if (omKeyInfo == null) { + throw new OMException("Key not found", KEY_NOT_FOUND); + } + + // Set the tags + omKeyInfo.getTags().clear(); + omKeyInfo.getTags().putAll(KeyValueUtil.getFromProtobuf(keyArgs.getTagsList())); + // Set the UpdateID to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache + omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry( + new CacheKey<>(dbOzoneKey), + CacheValue.get(trxnLogIndex, omKeyInfo) + ); + + omClientResponse = new S3PutObjectTaggingResponse( + omResponse.setPutObjectTaggingResponse(PutObjectTaggingResponse.newBuilder()).build(), + omKeyInfo + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3PutObjectTaggingResponse( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + markForAudit(ozoneManager.getAuditLogger(), buildAuditMessage( + OMAction.PUT_OBJECT_TAGGING, auditMap, exception, getOmRequest().getUserInfo() + )); + + switch (result) { + case SUCCESS: + LOG.debug("Put object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumPutObjectTaggingFails(); + if (OMClientRequestUtils.shouldLogClientRequestFailure(exception)) { + LOG.error("Put object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + } + break; + default: + LOG.error("Unrecognized Result for S3PutObjectTaggingRequest: {}", + putObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..2b6ca8601cb --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.s3.tagging.S3PutObjectTaggingResponseWithFSO; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; +import org.apache.ratis.server.protocol.TermIndex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles put object tagging request for FSO bucket. + */ +public class S3PutObjectTaggingRequestWithFSO extends S3PutObjectTaggingRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(S3PutObjectTaggingRequestWithFSO.class); + + public S3PutObjectTaggingRequestWithFSO(OMRequest omRequest, + BucketLayout bucketLayout) { + super(omRequest, bucketLayout); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + final long trxnLogIndex = termIndex.getIndex(); + + PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); + + KeyArgs keyArgs = putObjectTaggingRequest.getKeyArgs(); + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumPutObjectTagging(); + + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + IOException exception = null; + Result result = null; + try { + mergeOmLockDetails( + omMetadataManager.getLock() + .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName) + ); + acquiredLock = getOmLockDetails().isLockAcquired(); + + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists( + omMetadataManager, volumeName, bucketName, keyName, 0, + ozoneManager.getDefaultReplicationConfig()); + + if (keyStatus == null) { + throw new OMException("Key not found. Key: " + keyName, ResultCodes.KEY_NOT_FOUND); + } + + boolean isDirectory = keyStatus.isDirectory(); + + if (isDirectory) { + throw new OMException("PutObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION); + } + + OmKeyInfo omKeyInfo = keyStatus.getKeyInfo(); + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); + final String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + + // Set the tags + omKeyInfo.getTags().clear(); + omKeyInfo.getTags().putAll(KeyValueUtil.getFromProtobuf(keyArgs.getTagsList())); + // Set the UpdateId to the current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + // Note: Key modification time is not changed because S3 last modified + // time only changes when there are changes in the object content + + // Update table cache for file table. No need to check directory table since + // PutObjectTagging rejects operations on FSO directory + omMetadataManager.getKeyTable(getBucketLayout()) + .addCacheEntry(new CacheKey<>(dbKey), + CacheValue.get(trxnLogIndex, omKeyInfo)); + + omClientResponse = new S3PutObjectTaggingResponseWithFSO( + omResponse.setPutObjectTaggingResponse(PutObjectTaggingResponse.newBuilder()).build(), + omKeyInfo, volumeId, bucketId + ); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new S3PutObjectTaggingResponseWithFSO( + createErrorOMResponse(omResponse, exception), + getBucketLayout() + ); + } finally { + if (acquiredLock) { + mergeOmLockDetails(omMetadataManager.getLock() + .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + switch (result) { + case SUCCESS: + LOG.debug("Put object tagging success. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumPutObjectTaggingFails(); + LOG.error("Put object tagging failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, + bucketName, keyName, exception); + break; + default: + LOG.error("Unrecognized Result for S3PutObjectTaggingRequest: {}", + putObjectTaggingRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/package-info.java new file mode 100644 index 00000000000..d3f26d195ad --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +/** + * Package contains classes related to S3 tagging requests. + */ +package org.apache.hadoop.ozone.om.request.s3.tagging; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponse.java new file mode 100644 index 00000000000..10181c9468f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponse.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.key.OmKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Response for delete object tagging request. + */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE}) +public class S3DeleteObjectTaggingResponse extends OmKeyResponse { + + private OmKeyInfo omKeyInfo; + + public S3DeleteObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo) { + super(omResponse); + this.omKeyInfo = omKeyInfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3DeleteObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + omMetadataManager.getKeyTable(getBucketLayout()).putWithBatch(batchOperation, + omMetadataManager.getOzoneKey( + omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), + omKeyInfo.getKeyName()), + omKeyInfo + ); + } + + protected OmKeyInfo getOmKeyInfo() { + return omKeyInfo; + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..bb42668ad05 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3DeleteObjectTaggingResponseWithFSO.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; + +/** + * Response for delete object tagging request for FSO bucket. + */ +@CleanupTableInfo(cleanupTables = {FILE_TABLE}) +public class S3DeleteObjectTaggingResponseWithFSO extends S3DeleteObjectTaggingResponse { + + private long volumeId; + private long bucketId; + + public S3DeleteObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo, + @Nonnull long volumeId, + @Nonnull long bucketId) { + super(omResponse, omKeyInfo); + this.volumeId = volumeId; + this.bucketId = bucketId; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3DeleteObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName()); + omMetadataManager.getKeyTable(getBucketLayout()) + .putWithBatch(batchOperation, ozoneDbKey, getOmKeyInfo()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponse.java new file mode 100644 index 00000000000..2acefe2ec6e --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponse.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.key.OmKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Response for put object tagging request. + */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE}) +public class S3PutObjectTaggingResponse extends OmKeyResponse { + + private OmKeyInfo omKeyInfo; + + public S3PutObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyinfo) { + super(omResponse); + this.omKeyInfo = omKeyinfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3PutObjectTaggingResponse(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + omMetadataManager.getKeyTable(getBucketLayout()).putWithBatch(batchOperation, + omMetadataManager.getOzoneKey( + omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), + omKeyInfo.getKeyName()), + omKeyInfo + ); + } + + protected OmKeyInfo getOmKeyInfo() { + return omKeyInfo; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..6152fbabe89 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/S3PutObjectTaggingResponseWithFSO.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import jakarta.annotation.Nonnull; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; + +/** + * Response for put object tagging request for FSO bucket. + */ +@CleanupTableInfo(cleanupTables = {FILE_TABLE}) +public class S3PutObjectTaggingResponseWithFSO extends S3PutObjectTaggingResponse { + + private long volumeId; + private long bucketId; + + public S3PutObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo, + @Nonnull long volumeId, + @Nonnull long bucketId) { + super(omResponse, omKeyInfo); + this.volumeId = volumeId; + this.bucketId = bucketId; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public S3PutObjectTaggingResponseWithFSO(@Nonnull OMResponse omResponse, + @Nonnull BucketLayout bucketLayout) { + super(omResponse, bucketLayout); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, + getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName()); + omMetadataManager.getKeyTable(getBucketLayout()) + .putWithBatch(batchOperation, ozoneDbKey, getOmKeyInfo()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/package-info.java new file mode 100644 index 00000000000..9a104c4663a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tagging/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +/** + * Package contains classes related to S3 tagging responses. + */ +package org.apache.hadoop.ozone.om.response.s3.tagging; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 5682b040e85..594b862bd1f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.UUID; import java.util.stream.Collectors; @@ -42,6 +43,9 @@ import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OMAuditLogger; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingResponse; import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; @@ -385,6 +389,11 @@ public OMResponse handleReadRequest(OMRequest request) { startQuotaRepair(request.getStartQuotaRepairRequest()); responseBuilder.setStartQuotaRepairResponse(startQuotaRepairRsp); break; + case GetObjectTagging: + OzoneManagerProtocolProtos.GetObjectTaggingResponse getObjectTaggingResponse = + getObjectTagging(request.getGetObjectTaggingRequest()); + responseBuilder.setGetObjectTaggingResponse(getObjectTaggingResponse); + break; default: responseBuilder.setSuccess(false); responseBuilder.setMessage("Unrecognized Command Type: " + cmdType); @@ -1508,6 +1517,24 @@ private SetSafeModeResponse setSafeMode( .build(); } + private GetObjectTaggingResponse getObjectTagging(GetObjectTaggingRequest request) + throws IOException { + KeyArgs keyArgs = request.getKeyArgs(); + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(keyArgs.getVolumeName()) + .setBucketName(keyArgs.getBucketName()) + .setKeyName(keyArgs.getKeyName()) + .build(); + + GetObjectTaggingResponse.Builder resp = + GetObjectTaggingResponse.newBuilder(); + + Map result = impl.getObjectTagging(omKeyArgs); + + resp.addAllTags(KeyValueUtil.toProtobuf(result)); + return resp.build(); + } + private SafeModeAction toSafeModeAction( OzoneManagerProtocolProtos.SafeMode safeMode) { switch (safeMode) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java index bb3e3930059..c0f63e4d559 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java @@ -110,9 +110,9 @@ public void testGetRequestInstanceFromMap() { LOG.info("Validated request class instantiation for cmdType " + k); }); - assertEquals(13, omKeyReqsFSO.size()); - assertEquals(14, omKeyReqsLegacy.size()); - assertEquals(14, omKeyReqsOBS.size()); + assertEquals(15, omKeyReqsFSO.size()); + assertEquals(16, omKeyReqsLegacy.size()); + assertEquals(16, omKeyReqsOBS.size()); // Check if the number of instantiated OMKeyRequest classes is equal to // the number of keys in the mapping. assertEquals( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequest.java new file mode 100644 index 00000000000..9c307d85671 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequest.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Test delete object tagging request. + */ +public class TestS3DeleteObjectTaggingRequest extends TestOMKeyRequest { + + @Test + public void testPreExecute() throws Exception { + doPreExecute(volumeName, bucketName, keyName); + } + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + Map tags = getTags(5); + String ozoneKey = addKeyToTable(tags); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertEquals(tags.size(), omKeyInfo.getTags().size()); + + OMRequest originalRequest = createDeleteObjectTaggingRequest(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + + request = getDeleteObjectTaggingRequest(modifiedRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + OMResponse omResponse = omClientResponse.getOMResponse(); + + assertNotNull(omResponse.getDeleteObjectTaggingResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omResponse.getStatus()); + assertEquals(Type.DeleteObjectTagging, omResponse.getCmdType()); + + OmKeyInfo updatedKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(updatedKeyInfo); + assertEquals(omKeyInfo.getVolumeName(), updatedKeyInfo.getVolumeName()); + assertEquals(omKeyInfo.getBucketName(), updatedKeyInfo.getBucketName()); + assertEquals(omKeyInfo.getKeyName(), updatedKeyInfo.getKeyName()); + assertEquals(0, updatedKeyInfo.getTags().size()); + } + + @Test + public void testValidateAndUpdateCacheVolumeNotFound() throws Exception { + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheBucketNotFound() throws Exception { + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheKeyNotFound() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + protected OMRequest doPreExecute(String volumeName, String bucketName, + String keyName) throws Exception { + OMRequest originalRequest = createDeleteObjectTaggingRequest( + volumeName, bucketName, keyName); + + S3DeleteObjectTaggingRequest request = getDeleteObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + verifyRequest(modifiedRequest, originalRequest); + + return modifiedRequest; + } + + public OMRequest createDeleteObjectTaggingRequest(String volumeName, + String bucketName, + String keyName) { + KeyArgs.Builder keyArgs = KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName); + + + DeleteObjectTaggingRequest deleteObjectTaggingRequest = + DeleteObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + return OMRequest.newBuilder() + .setDeleteObjectTaggingRequest(deleteObjectTaggingRequest) + .setCmdType(Type.DeleteObjectTagging) + .setClientId(UUID.randomUUID().toString()) + .build(); + } + + private void verifyRequest(OMRequest modifiedRequest, OMRequest originalRequest) { + + KeyArgs original = originalRequest.getDeleteObjectTaggingRequest().getKeyArgs(); + + KeyArgs updated = modifiedRequest.getDeleteObjectTaggingRequest().getKeyArgs(); + + assertEquals(original.getVolumeName(), updated.getVolumeName()); + assertEquals(original.getBucketName(), updated.getBucketName()); + assertEquals(original.getKeyName(), updated.getKeyName()); + assertEquals(original.getTagsList(), updated.getTagsList()); + // Modification time will not be set for object tagging request + assertFalse(updated.hasModificationTime()); + } + + protected String addKeyToTable(Map tags) throws Exception { + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addKeyToTable(false, false, omKeyInfo, + clientID, 1L, omMetadataManager); + return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + } + + protected S3DeleteObjectTaggingRequest getDeleteObjectTaggingRequest(OMRequest originalRequest) { + return new S3DeleteObjectTaggingRequest(originalRequest, getBucketLayout()); + } + + protected Map getTags(int size) { + Map tags = new HashMap<>(); + for (int i = 0; i < size; i++) { + tags.put("tag-key-" + UUID.randomUUID(), "tag-value-" + UUID.randomUUID()); + } + return tags; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..ca3010a9b29 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3DeleteObjectTaggingRequestWithFSO.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; + +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + +/** + * Test delete object tagging request for FSO bucket. + */ +public class TestS3DeleteObjectTaggingRequestWithFSO extends TestS3DeleteObjectTaggingRequest { + + private static final String PARENT_DIR = "c/d/e"; + private static final String FILE_NAME = "file1"; + private static final String FILE_KEY = PARENT_DIR + "/" + FILE_NAME; + + @Override + protected String addKeyToTable(Map tags) throws Exception { + keyName = FILE_KEY; // updated key name + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, PARENT_DIR, omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(1L) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); + final long volumeId = omMetadataManager.getVolumeId( + omKeyInfo.getVolumeName()); + final long bucketId = omMetadataManager.getBucketId( + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + return omMetadataManager.getOzonePathKey( + volumeId, bucketId, omKeyInfo.getParentObjectID(), + omKeyInfo.getFileName()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected S3DeleteObjectTaggingRequest getDeleteObjectTaggingRequest(OMRequest originalRequest) { + return new S3DeleteObjectTaggingRequestWithFSO(originalRequest, getBucketLayout()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequest.java new file mode 100644 index 00000000000..c70c2587332 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequest.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test put object tagging request. + */ +public class TestS3PutObjectTaggingRequest extends TestOMKeyRequest { + + @Test + public void testPreExecute() throws Exception { + Map tags = new HashMap<>(); + getTags(2); + doPreExecute(volumeName, bucketName, keyName, tags); + } + + @Test + public void testValidateAndUpdateCacheSuccess() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + String ozoneKey = addKeyToTable(); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + + assertNotNull(omKeyInfo); + assertTrue(omKeyInfo.getTags().isEmpty()); + + Map tags = getTags(5); + + OMRequest originalRequest = createPutObjectTaggingRequest(volumeName, bucketName, keyName, tags); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + + request = getPutObjectTaggingRequest(modifiedRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + OMResponse omResponse = omClientResponse.getOMResponse(); + + assertNotNull(omResponse.getPutObjectTaggingResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omResponse.getStatus()); + assertEquals(Type.PutObjectTagging, omResponse.getCmdType()); + + OmKeyInfo updatedKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(updatedKeyInfo); + assertEquals(omKeyInfo.getVolumeName(), updatedKeyInfo.getVolumeName()); + assertEquals(omKeyInfo.getBucketName(), updatedKeyInfo.getBucketName()); + assertEquals(omKeyInfo.getKeyName(), updatedKeyInfo.getKeyName()); + assertEquals(tags.size(), updatedKeyInfo.getTags().size()); + for (Map.Entry tag: tags.entrySet()) { + String value = updatedKeyInfo.getTags().get(tag.getKey()); + assertNotNull(value); + assertEquals(tag.getValue(), value); + } + } + + @Test + public void testValidateAndUpdateCacheVolumeNotFound() throws Exception { + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheBucketNotFound() throws Exception { + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheKeyNotFound() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, keyName, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, + omClientResponse.getOMResponse().getStatus()); + } + + @Test + public void testValidateAndUpdateCacheEmptyTagSet() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + String ozoneKey = addKeyToTable(); + + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertTrue(omKeyInfo.getTags().isEmpty()); + + Map tags = getTags(0); + + OMRequest originalRequest = createPutObjectTaggingRequest(volumeName, bucketName, keyName, tags); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + + request = getPutObjectTaggingRequest(modifiedRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 1L); + OMResponse omResponse = omClientResponse.getOMResponse(); + + assertNotNull(omResponse.getPutObjectTaggingResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omResponse.getStatus()); + assertEquals(Type.PutObjectTagging, omResponse.getCmdType()); + + OmKeyInfo updatedKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertEquals(omKeyInfo.getVolumeName(), updatedKeyInfo.getVolumeName()); + assertEquals(omKeyInfo.getBucketName(), updatedKeyInfo.getBucketName()); + assertEquals(omKeyInfo.getKeyName(), updatedKeyInfo.getKeyName()); + assertTrue(omKeyInfo.getTags().isEmpty()); + assertEquals(tags.size(), updatedKeyInfo.getTags().size()); + } + + + protected OMRequest doPreExecute(String volumeName, + String bucketName, + String keyName, + Map tags) throws Exception { + OMRequest originalRequest = createPutObjectTaggingRequest( + volumeName, bucketName, keyName, tags); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(originalRequest); + + OMRequest modifiedRequest = request.preExecute(ozoneManager); + verifyRequest(modifiedRequest, originalRequest); + + return modifiedRequest; + } + + private OMRequest createPutObjectTaggingRequest(String volumeName, + String bucketName, + String keyName, + Map tags) { + KeyArgs.Builder keyArgs = KeyArgs.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName); + + if (tags != null && !tags.isEmpty()) { + keyArgs.addAllTags(KeyValueUtil.toProtobuf(tags)); + } + + PutObjectTaggingRequest putObjectTaggingRequest = + PutObjectTaggingRequest.newBuilder() + .setKeyArgs(keyArgs) + .build(); + + return OMRequest.newBuilder() + .setPutObjectTaggingRequest(putObjectTaggingRequest) + .setCmdType(Type.PutObjectTagging) + .setClientId(UUID.randomUUID().toString()) + .build(); + } + + private void verifyRequest(OMRequest modifiedRequest, OMRequest originalRequest) { + + KeyArgs original = originalRequest.getPutObjectTaggingRequest().getKeyArgs(); + + KeyArgs updated = modifiedRequest.getPutObjectTaggingRequest().getKeyArgs(); + + assertEquals(original.getVolumeName(), updated.getVolumeName()); + assertEquals(original.getBucketName(), updated.getBucketName()); + assertEquals(original.getKeyName(), updated.getKeyName()); + assertEquals(original.getTagsList(), updated.getTagsList()); + // Modification time will not be set for object tagging request + assertFalse(updated.hasModificationTime()); + } + + protected String addKeyToTable() throws Exception { + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), 1L, + omMetadataManager); + + return omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + } + + protected S3PutObjectTaggingRequest getPutObjectTaggingRequest(OMRequest originalRequest) { + return new S3PutObjectTaggingRequest(originalRequest, getBucketLayout()); + } + + protected Map getTags(int size) { + Map tags = new HashMap<>(); + for (int i = 0; i < size; i++) { + tags.put("tag-key-" + UUID.randomUUID(), "tag-value-" + UUID.randomUUID()); + } + return tags; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequestWithFSO.java new file mode 100644 index 00000000000..38ea5facad2 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/tagging/TestS3PutObjectTaggingRequestWithFSO.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.request.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.junit.jupiter.api.Test; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Test put object tagging request for FSO bucket. + */ +public class TestS3PutObjectTaggingRequestWithFSO extends TestS3PutObjectTaggingRequest { + + private static final String PARENT_DIR = "c/d/e"; + private static final String FILE_NAME = "file1"; + private static final String FILE_KEY = PARENT_DIR + "/" + FILE_NAME; + + @Test + public void testValidateAndUpdateCachePutObjectTaggingToDir() throws Exception { + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); + + addKeyToTable(); + + OMRequest modifiedOmRequest = + doPreExecute(volumeName, bucketName, PARENT_DIR, getTags(2)); + + S3PutObjectTaggingRequest request = getPutObjectTaggingRequest(modifiedOmRequest); + + OMClientResponse omClientResponse = + request.validateAndUpdateCache(ozoneManager, 2L); + + assertEquals(OzoneManagerProtocolProtos.Status.NOT_SUPPORTED_OPERATION, + omClientResponse.getOMResponse().getStatus()); + } + + @Override + protected String addKeyToTable() throws Exception { + keyName = FILE_KEY; // updated key name + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, PARENT_DIR, omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(1L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); + final long volumeId = omMetadataManager.getVolumeId( + omKeyInfo.getVolumeName()); + final long bucketId = omMetadataManager.getBucketId( + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + return omMetadataManager.getOzonePathKey( + volumeId, bucketId, omKeyInfo.getParentObjectID(), + omKeyInfo.getFileName()); + } + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected S3PutObjectTaggingRequest getPutObjectTaggingRequest(OMRequest originalRequest) { + return new S3PutObjectTaggingRequestWithFSO(originalRequest, getBucketLayout()); + } + + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponse.java new file mode 100644 index 00000000000..26daacf6f28 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; + +/** + * Test delete object tagging response. + */ +public class TestS3DeleteObjectTaggingResponse extends TestOMKeyResponse { + + @Test + public void testAddToBatch() throws Exception { + OzoneManagerProtocolProtos.OMResponse omResponse = + OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteObjectTaggingResponse( + OzoneManagerProtocolProtos.DeleteObjectTaggingResponse.getDefaultInstance()) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteObjectTagging) + .build(); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + + String ozoneKey = addKeyToTable(tags); + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertEquals(2, omKeyInfo.getTags().size()); + + omKeyInfo.getTags().clear(); + + S3DeleteObjectTaggingResponse deleteObjectTaggingResponse = getDeleteObjectTaggingResponse(omKeyInfo, omResponse); + + deleteObjectTaggingResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + OmKeyInfo updatedOmKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotSame(omKeyInfo, updatedOmKeyInfo); + assertNotNull(updatedOmKeyInfo); + assertEquals(0, updatedOmKeyInfo.getTags().size()); + } + + protected String addKeyToTable(Map tags) throws Exception { + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addKeyToTable(false, false, omKeyInfo, + clientID, 1L, omMetadataManager); + return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); + } + + protected S3DeleteObjectTaggingResponse getDeleteObjectTaggingResponse(OmKeyInfo omKeyInfo, + OzoneManagerProtocolProtos.OMResponse omResponse) + throws IOException { + return new S3DeleteObjectTaggingResponse(omResponse, omKeyInfo); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..923ff441e98 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3DeleteObjectTaggingResponseWithFSO.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + +/** + * Test delete object tagging response for FSO bucket. + */ +public class TestS3DeleteObjectTaggingResponseWithFSO extends TestS3DeleteObjectTaggingResponse { + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected String addKeyToTable(Map tags) throws Exception { + // Add volume, bucket and key entries to OM DB. + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, "", omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1) + .setParentObjectID(parentId) + .setUpdateID(1L) + .addAllTags(tags) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + keyName, omKeyInfo, -1, 50, omMetadataManager); + return omMetadataManager.getOzonePathKey( + omMetadataManager.getVolumeId(volumeName), + omMetadataManager.getBucketId(volumeName, bucketName), + omKeyInfo.getParentObjectID(), keyName); + } + + @Override + protected S3DeleteObjectTaggingResponse getDeleteObjectTaggingResponse(OmKeyInfo omKeyInfo, + OzoneManagerProtocolProtos.OMResponse omResponse) + throws IOException { + return new S3DeleteObjectTaggingResponseWithFSO(omResponse, omKeyInfo, + omMetadataManager.getVolumeId(volumeName), omBucketInfo.getObjectID()); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponse.java new file mode 100644 index 00000000000..af6565a447f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; + +/** + * Test put object tagging response. + */ +public class TestS3PutObjectTaggingResponse extends TestOMKeyResponse { + + @Test + public void testAddToDBBatch() throws Exception { + OzoneManagerProtocolProtos.OMResponse omResponse = + OzoneManagerProtocolProtos.OMResponse.newBuilder().setPutObjectTaggingResponse( + OzoneManagerProtocolProtos.PutObjectTaggingResponse.getDefaultInstance()) + .setStatus(OzoneManagerProtocolProtos.Status.OK) + .setCmdType(OzoneManagerProtocolProtos.Type.PutObjectTagging) + .build(); + + String ozoneKey = addKeyToTable(); + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotNull(omKeyInfo); + assertEquals(0, omKeyInfo.getTags().size()); + + Map tags = new HashMap<>(); + tags.put("tag-key1", "tag-value1"); + tags.put("tag-key2", "tag-value2"); + + omKeyInfo.setTags(tags); + + S3PutObjectTaggingResponse putObjectTaggingResponse = getPutObjectTaggingResponse(omKeyInfo, omResponse); + + putObjectTaggingResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Do manual commit and see whether addToBatch is successful or not. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + OmKeyInfo updatedOmKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); + assertNotSame(omKeyInfo, updatedOmKeyInfo); + assertNotNull(updatedOmKeyInfo); + assertEquals(tags.size(), updatedOmKeyInfo.getTags().size()); + } + + protected String addKeyToTable() throws Exception { + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), + omMetadataManager); + + return omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + } + + protected S3PutObjectTaggingResponse getPutObjectTaggingResponse(OmKeyInfo omKeyInfo, + OzoneManagerProtocolProtos.OMResponse omResponse) + throws IOException { + return new S3PutObjectTaggingResponse(omResponse, omKeyInfo); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponseWithFSO.java new file mode 100644 index 00000000000..1c93a527711 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/tagging/TestS3PutObjectTaggingResponseWithFSO.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.tagging; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.io.IOException; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + +/** + * Test put object tagging response for FSO bucket. + */ +public class TestS3PutObjectTaggingResponseWithFSO extends TestS3PutObjectTaggingResponse { + + @Override + public BucketLayout getBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + @Override + protected String addKeyToTable() throws Exception { + // Add volume, bucket and key entries to OM DB. + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + // Create parent dirs for the path + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, + bucketName, "", omMetadataManager); + + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1) + .setParentObjectID(parentId) + .setUpdateID(1L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, + keyName, omKeyInfo, -1, 50, omMetadataManager); + return omMetadataManager.getOzonePathKey( + omMetadataManager.getVolumeId(volumeName), + omMetadataManager.getBucketId(volumeName, bucketName), + omKeyInfo.getParentObjectID(), keyName); + } + + @Override + protected S3PutObjectTaggingResponse getPutObjectTaggingResponse(OmKeyInfo omKeyInfo, OMResponse omResponse) + throws IOException { + return new S3PutObjectTaggingResponseWithFSO(omResponse, omKeyInfo, + omMetadataManager.getVolumeId(volumeName), omBucketInfo.getObjectID()); + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java index 20c2f4c6275..8e3bcaf34aa 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/audit/S3GAction.java @@ -48,7 +48,10 @@ public enum S3GAction implements AuditAction { DELETE_KEY, CREATE_DIRECTORY, GENERATE_SECRET, - REVOKE_SECRET; + REVOKE_SECRET, + GET_OBJECT_TAGGING, + PUT_OBJECT_TAGGING, + DELETE_OBJECT_TAGGING; @Override public String getAction() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 4ffc3011935..fbb0614c4f4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -72,6 +72,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_TAG; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; +import static org.apache.hadoop.ozone.s3.util.S3Consts.AWS_TAG_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_KEY_LENGTH_LIMIT; @@ -363,59 +364,70 @@ protected Map getTaggingFromHeaders(HttpHeaders httpHeaders) List tagPairs = URLEncodedUtils.parse(tagString, UTF_8); - if (tagPairs.isEmpty()) { - return Collections.emptyMap(); - } + return validateAndGetTagging(tagPairs, NameValuePair::getName, NameValuePair::getValue); + } - Map tags = new HashMap<>(); - // Tag restrictions: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_S3Tag.html - for (NameValuePair tagPair: tagPairs) { - if (StringUtils.isEmpty(tagPair.getName())) { - OS3Exception ex = newError(INVALID_TAG, TAG_HEADER); - ex.setErrorMessage("Some tag keys are empty, please specify the non-empty tag keys"); + protected static Map validateAndGetTagging( + List tagList, + Function getTagKey, + Function getTagValue + ) throws OS3Exception { + final Map tags = new HashMap<>(); + for (KV tagPair : tagList) { + final String tagKey = getTagKey.apply(tagPair); + final String tagValue = getTagValue.apply(tagPair); + // Tag restrictions: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_S3Tag.html + if (StringUtils.isEmpty(tagKey)) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, TAG_HEADER); + ex.setErrorMessage("Some tag keys are empty, please only specify non-empty tag keys"); throw ex; } - if (tagPair.getValue() == null) { - // For example for query parameter with only value (e.g. "tag1") - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); - ex.setErrorMessage("Some tag values are not specified, please specify the tag values"); + if (StringUtils.startsWith(tagKey, AWS_TAG_PREFIX)) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); + ex.setErrorMessage("Tag key cannot start with \"aws:\" prefix"); throw ex; } - if (tags.containsKey(tagPair.getName())) { - // Tags that are associated with an object must have unique tag keys - // Reject request if the same key is used twice on the same resource - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); - ex.setErrorMessage("There are tags with duplicate tag keys, tag keys should be unique"); + if (tagValue == null) { + // For example for query parameter with only value (e.g. "tag1") + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); + ex.setErrorMessage("Some tag values are not specified, please specify the tag values"); throw ex; } - if (tagPair.getName().length() > TAG_KEY_LENGTH_LIMIT) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + if (tagKey.length() > TAG_KEY_LENGTH_LIMIT) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); ex.setErrorMessage("The tag key exceeds the maximum length of " + TAG_KEY_LENGTH_LIMIT); throw ex; } - if (tagPair.getValue().length() > TAG_VALUE_LENGTH_LIMIT) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getValue()); + if (tagValue.length() > TAG_VALUE_LENGTH_LIMIT) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagValue); ex.setErrorMessage("The tag value exceeds the maximum length of " + TAG_VALUE_LENGTH_LIMIT); throw ex; } - if (!TAG_REGEX_PATTERN.matcher(tagPair.getName()).matches()) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getName()); + if (!TAG_REGEX_PATTERN.matcher(tagKey).matches()) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); ex.setErrorMessage("The tag key does not have a valid pattern"); throw ex; } - if (!TAG_REGEX_PATTERN.matcher(tagPair.getValue()).matches()) { - OS3Exception ex = newError(INVALID_TAG, tagPair.getValue()); + if (!TAG_REGEX_PATTERN.matcher(tagValue).matches()) { + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagValue); ex.setErrorMessage("The tag value does not have a valid pattern"); throw ex; } - tags.put(tagPair.getName(), tagPair.getValue()); + final String previous = tags.put(tagKey, tagValue); + if (previous != null) { + // Tags that are associated with an object must have unique tag keys + // Reject request if the same key is used twice on the same resource + OS3Exception ex = S3ErrorTable.newError(INVALID_TAG, tagKey); + ex.setErrorMessage("There are tags with duplicate tag keys, tag keys should be unique"); + throw ex; + } } if (tags.size() > TAG_NUM_LIMIT) { @@ -426,7 +438,7 @@ protected Map getTaggingFromHeaders(HttpHeaders httpHeaders) throw ex; } - return tags; + return Collections.unmodifiableMap(tags); } private AuditMessage.Builder auditMessageBaseBuilder(AuditAction op, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 9dbc7b9aaba..21ad28254d7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -68,6 +68,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.ozone.s3.HeaderPreprocessor; import org.apache.hadoop.ozone.s3.SignedChunksInputStream; +import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.util.RFC1123Util; @@ -212,7 +213,7 @@ public void init() { * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for * more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @PUT public Response put( @PathParam("bucket") String bucketName, @@ -220,6 +221,7 @@ public Response put( @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, + @QueryParam("tagging") String taggingMarker, final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; @@ -230,6 +232,11 @@ public Response put( DigestInputStream digestInputStream = null; try { OzoneVolume volume = getVolume(); + if (taggingMarker != null) { + s3GAction = S3GAction.PUT_OBJECT_TAGGING; + return putObjectTagging(volume, bucketName, keyPath, body); + } + if (uploadID != null && !uploadID.equals("")) { if (headers.getHeaderString(COPY_SOURCE_HEADER) == null) { s3GAction = S3GAction.CREATE_MULTIPART_KEY; @@ -336,7 +343,9 @@ public Response put( } catch (OMException ex) { auditSuccess = false; auditWriteFailure(s3GAction, ex); - if (copyHeader != null) { + if (taggingMarker != null) { + getMetrics().updatePutObjectTaggingFailureStats(startNanos); + } else if (copyHeader != null) { getMetrics().updateCopyObjectFailureStats(startNanos); } else { getMetrics().updateCreateKeyFailureStats(startNanos); @@ -360,7 +369,9 @@ public Response put( } catch (Exception ex) { auditSuccess = false; auditWriteFailure(s3GAction, ex); - if (copyHeader != null) { + if (taggingMarker != null) { + getMetrics().updatePutObjectTaggingFailureStats(startNanos); + } else if (copyHeader != null) { getMetrics().updateCopyObjectFailureStats(startNanos); } else { getMetrics().updateCreateKeyFailureStats(startNanos); @@ -390,7 +401,7 @@ public Response put( * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html * for more details. */ - @SuppressWarnings("checkstyle:MethodLength") + @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) @GET public Response get( @PathParam("bucket") String bucketName, @@ -398,12 +409,18 @@ public Response get( @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") String uploadId, @QueryParam("max-parts") @DefaultValue("1000") int maxParts, - @QueryParam("part-number-marker") String partNumberMarker) + @QueryParam("part-number-marker") String partNumberMarker, + @QueryParam("tagging") String taggingMarker) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; PerformanceStringBuilder perf = new PerformanceStringBuilder(); try { + if (taggingMarker != null) { + s3GAction = S3GAction.GET_OBJECT_TAGGING; + return getObjectTagging(bucketName, keyPath); + } + if (uploadId != null) { // When we have uploadId, this is the request for list Parts. s3GAction = S3GAction.LIST_PARTS; @@ -532,7 +549,9 @@ public Response get( AUDIT.logReadFailure( buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex) ); - if (uploadId != null) { + if (taggingMarker != null) { + getMetrics().updateGetObjectTaggingFailureStats(startNanos); + } else if (uploadId != null) { getMetrics().updateListPartsFailureStats(startNanos); } else { getMetrics().updateGetKeyFailureStats(startNanos); @@ -699,13 +718,19 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, public Response delete( @PathParam("bucket") String bucketName, @PathParam("path") String keyPath, - @QueryParam("uploadId") @DefaultValue("") String uploadId) throws + @QueryParam("uploadId") @DefaultValue("") String uploadId, + @QueryParam("tagging") String taggingMarker) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; try { OzoneVolume volume = getVolume(); + if (taggingMarker != null) { + s3GAction = S3GAction.DELETE_OBJECT_TAGGING; + return deleteObjectTagging(volume, bucketName, keyPath); + } + if (uploadId != null && !uploadId.equals("")) { s3GAction = S3GAction.ABORT_MULTIPART_UPLOAD; return abortMultipartUpload(volume, bucketName, keyPath, uploadId); @@ -732,13 +757,18 @@ public Response delete( // keys. Just return 204 } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); + } else if (ex.getResult() == ResultCodes.NOT_SUPPORTED_OPERATION) { + // When deleteObjectTagging operation is applied on FSO directory + throw S3ErrorTable.newError(S3ErrorTable.NOT_IMPLEMENTED, keyPath); } else { throw ex; } } catch (Exception ex) { AUDIT.logWriteFailure( buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex)); - if (uploadId != null && !uploadId.equals("")) { + if (taggingMarker != null) { + getMetrics().updateDeleteObjectTaggingFailureStats(startNanos); + } else if (uploadId != null && !uploadId.equals("")) { getMetrics().updateAbortMultipartUploadFailureStats(startNanos); } else { getMetrics().updateDeleteKeyFailureStats(startNanos); @@ -1381,6 +1411,75 @@ public static boolean checkCopySourceModificationTime( (lastModificationTime <= copySourceIfUnmodifiedSince); } + private Response putObjectTagging(OzoneVolume volume, String bucketName, String keyName, InputStream body) + throws IOException, OS3Exception { + long startNanos = Time.monotonicNowNanos(); + S3Tagging tagging = null; + try { + tagging = new PutTaggingUnmarshaller().readFrom(body); + tagging.validate(); + } catch (Exception ex) { + OS3Exception exception = S3ErrorTable.newError(S3ErrorTable.MALFORMED_XML, keyName); + exception.setErrorMessage(exception.getErrorMessage() + ". " + ex.getMessage()); + throw exception; + } + + Map tags = validateAndGetTagging( + tagging.getTagSet().getTags(), // Nullity check was done in previous parsing step + Tag::getKey, + Tag::getValue + ); + + try { + volume.getBucket(bucketName).putObjectTagging(keyName, tags); + } catch (OMException ex) { + if (ex.getResult() == ResultCodes.INVALID_REQUEST) { + throw S3ErrorTable.newError(S3ErrorTable.INVALID_REQUEST, keyName); + } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { + throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyName); + } else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { + throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, keyName); + } else if (ex.getResult() == ResultCodes.NOT_SUPPORTED_OPERATION) { + // When putObjectTagging operation is applied on FSO directory + throw S3ErrorTable.newError(S3ErrorTable.NOT_IMPLEMENTED, keyName); + } + throw ex; + } + getMetrics().updatePutObjectTaggingSuccessStats(startNanos); + return Response.ok().build(); + } + + private Response getObjectTagging(String bucketName, String keyName) throws IOException { + long startNanos = Time.monotonicNowNanos(); + + OzoneVolume volume = getVolume(); + + Map tagMap = volume.getBucket(bucketName).getObjectTagging(keyName); + + getMetrics().updateGetObjectTaggingSuccessStats(startNanos); + return Response.ok(S3Tagging.fromMap(tagMap), MediaType.APPLICATION_XML_TYPE).build(); + } + + private Response deleteObjectTagging(OzoneVolume volume, String bucketName, String keyName) + throws IOException, OS3Exception { + long startNanos = Time.monotonicNowNanos(); + + try { + volume.getBucket(bucketName).deleteObjectTagging(keyName); + } catch (OMException ex) { + // Unlike normal key deletion that ignores the key not found exception + // DeleteObjectTagging should throw the exception if the key does not exist + if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { + throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, keyName); + } + throw ex; + } + + getMetrics().updateDeleteObjectTaggingSuccessStats(startNanos); + return Response.noContent().build(); + } + + @VisibleForTesting public void setOzoneConfiguration(OzoneConfiguration config) { this.ozoneConfiguration = config; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutTaggingUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutTaggingUnmarshaller.java new file mode 100644 index 00000000000..eb5c8a14874 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PutTaggingUnmarshaller.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; + +import javax.ws.rs.WebApplicationException; +import javax.xml.XMLConstants; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.UnmarshallerHandler; +import javax.xml.parsers.SAXParserFactory; +import java.io.InputStream; + +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; +import static org.apache.hadoop.ozone.s3.util.S3Consts.S3_XML_NAMESPACE; +import static org.apache.hadoop.ozone.s3.util.S3Utils.wrapOS3Exception; + +/** + * Custom unmarshaller to read Tagging request body. + */ +public class PutTaggingUnmarshaller { + + private JAXBContext context; + private SAXParserFactory saxParserFactory; + + public PutTaggingUnmarshaller() { + try { + context = JAXBContext.newInstance(S3Tagging.class); + saxParserFactory = SAXParserFactory.newInstance(); + saxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); + } catch (Exception ex) { + throw new AssertionError("Can not instantiate " + + "PutTaggingUnmarshaller parser", ex); + } + } + + public S3Tagging readFrom(InputStream inputStream) + throws WebApplicationException { + try { + XMLReader xmlReader = saxParserFactory.newSAXParser().getXMLReader(); + UnmarshallerHandler unmarshallerHandler = + context.createUnmarshaller().getUnmarshallerHandler(); + XmlNamespaceFilter filter = + new XmlNamespaceFilter(S3_XML_NAMESPACE); + filter.setContentHandler(unmarshallerHandler); + filter.setParent(xmlReader); + filter.parse(new InputSource(inputStream)); + return (S3Tagging) unmarshallerHandler.getResult(); + } catch (Exception e) { + throw wrapOS3Exception(INVALID_REQUEST.withMessage(e.getMessage())); + } + } + +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Tagging.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Tagging.java new file mode 100644 index 00000000000..0a0f289f1d8 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Tagging.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * S3 tagging. + */ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(name = "Tagging", + namespace = "http://s3.amazonaws.com/doc/2006-03-01/") +public class S3Tagging { + + @XmlElement(name = "TagSet") + private TagSet tagSet; + + public S3Tagging() { + + } + + public S3Tagging(TagSet tagSet) { + this.tagSet = tagSet; + } + + public TagSet getTagSet() { + return tagSet; + } + + public void setTagSet(TagSet tagSet) { + this.tagSet = tagSet; + } + + /** + * Entity for child element TagSet. + */ + @XmlAccessorType(XmlAccessType.FIELD) + @XmlRootElement(name = "TagSet") + public static class TagSet { + @XmlElement(name = "Tag") + private List tags = new ArrayList<>(); + + public TagSet() { + } + + public TagSet(List tags) { + this.tags = tags; + } + + public List getTags() { + return tags; + } + + public void setTags(List tags) { + this.tags = tags; + } + } + + /** + * Entity for child element Tag. + */ + @XmlAccessorType(XmlAccessType.FIELD) + @XmlRootElement(name = "Tag") + public static class Tag { + @XmlElement(name = "Key") + private String key; + + @XmlElement(name = "Value") + private String value; + + public Tag() { + } + + public Tag(String key, String value) { + this.key = key; + this.value = value; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + } + + /** + * Creates a S3 tagging instance (xml representation) from a Map retrieved + * from OM. + * @param tagMap Map representing the tags. + * @return {@link S3Tagging} + */ + public static S3Tagging fromMap(Map tagMap) { + List tags = tagMap.entrySet() + .stream() + .map( + tagEntry -> new Tag(tagEntry.getKey(), tagEntry.getValue()) + ) + .collect(Collectors.toList()); + return new S3Tagging(new TagSet(tags)); + } + + /** + * Additional XML validation logic for S3 tagging. + */ + public void validate() { + if (tagSet == null) { + throw new IllegalArgumentException("TagSet needs to be specified"); + } + + if (tagSet.getTags().isEmpty()) { + throw new IllegalArgumentException("Tags need to be specified and cannot be empty"); + } + + for (Tag tag: tagSet.getTags()) { + if (tag.getKey() == null) { + throw new IllegalArgumentException("Some tag keys are not specified"); + } + if (tag.getValue() == null) { + throw new IllegalArgumentException("Tag value for tag " + tag.getKey() + " is not specified"); + } + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index 42c044086b8..49761f89a3a 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -145,6 +145,10 @@ private S3ErrorTable() { public static final OS3Exception NO_SUCH_TAG_SET = new OS3Exception( "NoSuchTagSet", "The specified tag does not exist.", HTTP_NOT_FOUND); + public static final OS3Exception MALFORMED_XML = new OS3Exception( + "MalformedXML", "The XML you provided was not well-formed or did not " + + "validate against our published schema", HTTP_BAD_REQUEST); + public static OS3Exception newError(OS3Exception e, String resource) { return newError(e, resource, null); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java index ea34be638eb..c13bad5d662 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/metrics/S3GatewayMetrics.java @@ -95,6 +95,12 @@ public final class S3GatewayMetrics implements Closeable, MetricsSource { private @Metric MutableCounterLong copyObjectSuccessLength; private @Metric MutableCounterLong putKeySuccessLength; private @Metric MutableCounterLong getKeySuccessLength; + private @Metric MutableCounterLong getObjectTaggingSuccess; + private @Metric MutableCounterLong getObjectTaggingFailure; + private @Metric MutableCounterLong putObjectTaggingSuccess; + private @Metric MutableCounterLong putObjectTaggingFailure; + private @Metric MutableCounterLong deleteObjectTaggingSuccess; + private @Metric MutableCounterLong deleteObjectTaggingFailure; // S3 Gateway Latency Metrics // BucketEndpoint @@ -246,6 +252,24 @@ public final class S3GatewayMetrics implements Closeable, MetricsSource { @Metric(about = "Latency for copy metadata of an key in nanoseconds") private PerformanceMetrics copyKeyMetadataLatencyNs; + @Metric(about = "Latency for successful get object tagging of a key in nanoseconds") + private PerformanceMetrics getObjectTaggingSuccessLatencyNs; + + @Metric(about = "Latency for failing to get object tagging of a key in nanoseconds") + private PerformanceMetrics getObjectTaggingFailureLatencyNs; + + @Metric(about = "Latency for successful put object tagging of a key in nanoseconds") + private PerformanceMetrics putObjectTaggingSuccessLatencyNs; + + @Metric(about = "Latency for failing to put object tagging of a key in nanoseconds") + private PerformanceMetrics putObjectTaggingFailureLatencyNs; + + @Metric(about = "Latency for successful delete object tagging of a key in nanoseconds") + private PerformanceMetrics deleteObjectTaggingSuccessLatencyNs; + + @Metric(about = "Latency for failing to delete object tagging of a key in nanoseconds") + private PerformanceMetrics deleteObjectTaggingFailureLatencyNs; + private final Map performanceMetrics; /** @@ -375,6 +399,18 @@ public void getMetrics(MetricsCollector collector, boolean all) { putKeySuccessLength.snapshot(recordBuilder, true); getKeySuccessLength.snapshot(recordBuilder, true); listKeyCount.snapshot(recordBuilder, true); + getObjectTaggingSuccess.snapshot(recordBuilder, true); + getObjectTaggingSuccessLatencyNs.snapshot(recordBuilder, true); + getObjectTaggingFailure.snapshot(recordBuilder, true); + getObjectTaggingFailureLatencyNs.snapshot(recordBuilder, true); + putObjectTaggingSuccess.snapshot(recordBuilder, true); + putObjectTaggingSuccessLatencyNs.snapshot(recordBuilder, true); + putObjectTaggingFailure.snapshot(recordBuilder, true); + putObjectTaggingFailureLatencyNs.snapshot(recordBuilder, true); + deleteObjectTaggingSuccess.snapshot(recordBuilder, true); + deleteObjectTaggingSuccessLatencyNs.snapshot(recordBuilder, true); + deleteObjectTaggingFailure.snapshot(recordBuilder, true); + deleteObjectTaggingFailureLatencyNs.snapshot(recordBuilder, true); } // INC and UPDATE @@ -596,6 +632,36 @@ public void incGetKeySuccessLength(long bytes) { getKeySuccessLength.incr(bytes); } + public void updateGetObjectTaggingSuccessStats(long startNanos) { + this.getObjectTaggingSuccess.incr(); + this.getObjectTaggingSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updateGetObjectTaggingFailureStats(long startNanos) { + this.getObjectTaggingFailure.incr(); + this.getObjectTaggingFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectTaggingSuccessStats(long startNanos) { + this.putObjectTaggingSuccess.incr(); + this.putObjectTaggingSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updatePutObjectTaggingFailureStats(long startNanos) { + this.putObjectTaggingFailure.incr(); + this.putObjectTaggingFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updateDeleteObjectTaggingSuccessStats(long startNanos) { + this.deleteObjectTaggingSuccess.incr(); + this.deleteObjectTaggingSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + + public void updateDeleteObjectTaggingFailureStats(long startNanos) { + this.deleteObjectTaggingFailure.incr(); + this.deleteObjectTaggingFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos); + } + // GET public long getListS3BucketsSuccess() { return listS3BucketsSuccess.value(); @@ -737,6 +803,30 @@ public long getListS3BucketsFailure() { return listS3BucketsFailure.value(); } + public long getGetObjectTaggingSuccess() { + return getObjectTaggingSuccess.value(); + } + + public long getGetObjectTaggingFailure() { + return getObjectTaggingFailure.value(); + } + + public long getPutObjectTaggingSuccess() { + return putObjectTaggingSuccess.value(); + } + + public long getPutObjectTaggingFailure() { + return putObjectTaggingFailure.value(); + } + + public long getDeleteObjectTaggingSuccess() { + return deleteObjectTaggingSuccess.value(); + } + + public long getDeleteObjectTaggingFailure() { + return deleteObjectTaggingFailure.value(); + } + private long updateAndGetStats(PerformanceMetrics metric, long startNanos) { long value = Time.monotonicNowNanos() - startNanos; metric.add(value); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index cbdbef0e0a1..7b82d5c2a70 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -74,6 +74,7 @@ private S3Consts() { public static final String TAG_HEADER = "x-amz-tagging"; public static final String TAG_DIRECTIVE_HEADER = "x-amz-tagging-directive"; public static final String TAG_COUNT_HEADER = "x-amz-tagging-count"; + public static final String AWS_TAG_PREFIX = "aws:"; public static final int TAG_NUM_LIMIT = 10; public static final int TAG_KEY_LENGTH_LIMIT = 128; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index e3e3537b1c3..41584c9786d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -771,4 +771,20 @@ public void recoverKey(OmKeyArgs args, long clientID) throws IOException { } + @Override + public Map getObjectTagging(String volumeName, String bucketName, String keyName) throws IOException { + return getBucket(volumeName, bucketName).getObjectTagging(keyName); + } + + @Override + public void putObjectTagging(String volumeName, String bucketName, String keyName, Map tags) + throws IOException { + getBucket(volumeName, bucketName).putObjectTagging(keyName, tags); + } + + @Override + public void deleteObjectTagging(String volumeName, String bucketName, String keyName) throws IOException { + getBucket(volumeName, bucketName).deleteObjectTagging(keyName); + } + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index 06b6a8efb71..21f2414c0a7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -596,6 +596,37 @@ public boolean setAcl(List acls) throws IOException { return aclList.addAll(acls); } + @Override + public Map getObjectTagging(String keyName) throws IOException { + if (keyDetails.containsKey(keyName)) { + OzoneKeyDetails ozoneKeyDetails = keyDetails.get(keyName); + return ozoneKeyDetails.getTags(); + } else { + throw new OMException(ResultCodes.KEY_NOT_FOUND); + } + } + + @Override + public void putObjectTagging(String keyName, Map tags) throws IOException { + if (keyDetails.containsKey(keyName)) { + OzoneKeyDetails ozoneKeyDetails = keyDetails.get(keyName); + ozoneKeyDetails.getTags().clear(); + ozoneKeyDetails.getTags().putAll(tags); + } else { + throw new OMException(ResultCodes.KEY_NOT_FOUND); + } + } + + @Override + public void deleteObjectTagging(String keyName) throws IOException { + if (keyDetails.containsKey(keyName)) { + OzoneKeyDetails ozoneKeyDetails = keyDetails.get(keyName); + ozoneKeyDetails.getTags().clear(); + } else { + throw new OMException(ResultCodes.KEY_NOT_FOUND); + } + } + /** * Class used to hold part information in a upload part request. */ diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index 9c107bdb5b1..1356b50ad35 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -69,13 +69,13 @@ public void testAbortMultipartUpload() throws Exception { // Abort multipart upload - response = rest.delete(bucket, key, uploadID); + response = rest.delete(bucket, key, uploadID, null); assertEquals(204, response.getStatus()); // test with unknown upload Id. try { - rest.delete(bucket, key, "random"); + rest.delete(bucket, key, "random", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode()); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 677367e6d81..bf4768e0810 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -77,17 +77,17 @@ public static void setUp() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 2, uploadID, body); + content.length(), 2, uploadID, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 3, uploadID, body); + content.length(), 3, uploadID, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -95,7 +95,7 @@ public static void setUp() throws Exception { @Test public void testListParts() throws Exception { Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 3, "0"); + uploadID, 3, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -108,7 +108,7 @@ public void testListParts() throws Exception { @Test public void testListPartsContinuation() throws Exception { Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0"); + uploadID, 2, "0", null); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -117,7 +117,7 @@ public void testListPartsContinuation() throws Exception { // Continue response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, uploadID, 2, - Integer.toString(listPartsResponse.getNextPartNumberMarker())); + Integer.toString(listPartsResponse.getNextPartNumberMarker()), null); listPartsResponse = (ListPartsResponse) response.getEntity(); assertFalse(listPartsResponse.getTruncated()); @@ -129,7 +129,7 @@ public void testListPartsContinuation() throws Exception { public void testListPartsWithUnknownUploadID() throws Exception { try { REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0"); + uploadID, 2, "0", null); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index b23dbfb9c05..57529cc9d8c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -109,7 +109,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, body); + partNumber, uploadID, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index d9595aeff79..8caf55dd829 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -330,7 +330,7 @@ private Part uploadPart(String key, String uploadID, int partNumber, String ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, body); + partNumber, uploadID, null, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -375,7 +375,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, - uploadID, body); + uploadID, null, body); assertEquals(200, response.getStatus()); CopyPartResult result = (CopyPartResult) response.getEntity(); @@ -402,7 +402,7 @@ public void testUploadWithRangeCopyContentLength() OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY); additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, "bytes=0-3"); setHeaders(additionalHeaders); - REST.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, body); + REST.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, body); OzoneMultipartUploadPartListParts parts = CLIENT.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) .listParts(KEY, uploadID, 0, 100); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java index 90695f03ff9..340ed1984ec 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java @@ -51,7 +51,7 @@ public void delete() throws IOException, OS3Exception { rest.setOzoneConfiguration(new OzoneConfiguration()); //WHEN - rest.delete("b1", "key1", null); + rest.delete("b1", "key1", null, null); //THEN assertFalse(bucket.listKeys("").hasNext(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index 8cf8da95cf8..cc7f43b2863 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -95,11 +95,11 @@ public void init() throws OS3Exception, IOException { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, body); // Create a key with object tags when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, body); + 1, null, null, body); context = mock(ContainerRequestContext.class); when(context.getUriInfo()).thenReturn(mock(UriInfo.class)); @@ -111,7 +111,7 @@ public void init() throws OS3Exception, IOException { @Test public void get() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -133,7 +133,7 @@ public void get() throws IOException, OS3Exception { @Test public void getKeyWithTag() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, null); //THEN OzoneInputStream ozoneInputStream = @@ -155,7 +155,7 @@ public void getKeyWithTag() throws IOException, OS3Exception { public void inheritRequestHeader() throws IOException, OS3Exception { setDefaultHeader(); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE1, response.getHeaderString("Content-Type")); @@ -188,7 +188,7 @@ public void overrideResponseHeader() throws IOException, OS3Exception { when(context.getUriInfo().getQueryParameters()) .thenReturn(queryParameter); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(CONTENT_TYPE2, response.getHeaderString("Content-Type")); @@ -209,13 +209,13 @@ public void getRangeHeader() throws IOException, OS3Exception { Response response; when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals("1", response.getHeaderString("Content-Length")); assertEquals(String.format("bytes 0-0/%s", CONTENT.length()), response.getHeaderString("Content-Range")); when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(String.valueOf(CONTENT.length()), response.getHeaderString("Content-Length")); assertEquals( @@ -228,7 +228,7 @@ public void getRangeHeader() throws IOException, OS3Exception { @Test public void getStatusCode() throws IOException, OS3Exception { Response response; - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.OK.getStatusCode()); @@ -236,7 +236,7 @@ public void getStatusCode() throws IOException, OS3Exception { // The 206 (Partial Content) status code indicates that the server is // successfully fulfilling a range request for the target resource when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); assertEquals(response.getStatus(), Response.Status.PARTIAL_CONTENT.getStatusCode()); assertNull(response.getHeaderString(TAG_COUNT_HEADER)); @@ -270,7 +270,7 @@ public void testGetWhenKeyIsDirectoryAndDoesNotEndWithASlash() // WHEN final OS3Exception ex = assertThrows(OS3Exception.class, - () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null)); + () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null, null)); // THEN assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index 8b3d9e1ad2a..4049e03b438 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -157,7 +157,7 @@ void testPutObject(int length, ReplicationConfig replication) throws IOException bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, body); + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, null, body); //THEN assertEquals(200, response.getStatus()); @@ -184,7 +184,7 @@ void testPutObjectContentLength() throws IOException, OS3Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); long dataSize = CONTENT.length(); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, body); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, null, body); assertEquals(dataSize, getKeyDataSize()); } @@ -201,7 +201,7 @@ void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertEquals(15, getKeyDataSize()); } @@ -216,7 +216,7 @@ public void testPutObjectWithTags() throws IOException, OS3Exception { objectEndpoint.setHeaders(headersWithTags); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, body); assertEquals(200, response.getStatus()); @@ -239,7 +239,7 @@ public void testPutObjectWithOnlyTagKey() throws Exception { try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, body); fail("request with invalid query param should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -257,7 +257,7 @@ public void testPutObjectWithDuplicateTagKey() throws Exception { objectEndpoint.setHeaders(headersWithDuplicateTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, body); fail("request with duplicate tag key should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -276,7 +276,7 @@ public void testPutObjectWithLongTagKey() throws Exception { objectEndpoint.setHeaders(headersWithLongTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, body); fail("request with tag key exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -295,7 +295,7 @@ public void testPutObjectWithLongTagValue() throws Exception { when(headersWithLongTagValue.getHeaderString(TAG_HEADER)).thenReturn("tag1=" + longTagValue); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, body); fail("request with tag value exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -320,7 +320,7 @@ public void testPutObjectWithTooManyTags() throws Exception { objectEndpoint.setHeaders(headersWithTooManyTags); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, body); + 1, null, null, body); fail("request with number of tags exceeding limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -349,7 +349,7 @@ void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //WHEN Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - chunkedContent.length(), 1, null, + chunkedContent.length(), 1, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN @@ -378,7 +378,7 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -403,7 +403,7 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -429,7 +429,7 @@ void testCopyObject() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(KEY_NAME)); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, body); + null, null, body); // Check destination key and response ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) @@ -459,7 +459,7 @@ void testCopyObject() throws IOException, OS3Exception { metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, body); + null, null, body); ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) .readKey(DEST_KEY); @@ -486,7 +486,7 @@ void testCopyObject() throws IOException, OS3Exception { // wrong copy metadata directive when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body), + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, null, body), "test copy object failed"); assertThat(e.getHttpCode()).isEqualTo(400); assertThat(e.getCode()).isEqualTo("InvalidArgument"); @@ -496,7 +496,7 @@ void testCopyObject() throws IOException, OS3Exception { // source and dest same e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body), + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, body), "test copy object failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); @@ -504,28 +504,28 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, - DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", KEY_NAME, CONTENT.length(), 1, null, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, null, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); } @@ -537,7 +537,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -564,7 +564,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException try { objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, body); + null, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -586,7 +586,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { String sourceKeyName = "sourceKey"; Response putResponse = objectEndpoint.put(BUCKET_NAME, sourceKeyName, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, body); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(sourceKeyName); @@ -603,7 +603,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(sourceKeyName)); objectEndpoint.setHeaders(headersForCopy); - Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, body); OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); @@ -622,7 +622,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // With x-amz-tagging-directive = COPY with a different x-amz-tagging when(headersForCopy.getHeaderString(TAG_HEADER)).thenReturn("tag3=value3"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -637,7 +637,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // Copy object with x-amz-tagging-directive = REPLACE when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("REPLACE"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -659,7 +659,7 @@ public void testCopyObjectWithInvalidTagCopyDirective() throws Exception { HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("INVALID"); try { - objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, body); + objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, null, body); } catch (OS3Exception ex) { assertEquals(INVALID_ARGUMENT.getCode(), ex.getCode()); assertThat(ex.getErrorMessage()).contains("The tagging copy directive specified is invalid"); @@ -674,7 +674,7 @@ void testInvalidStorageType() { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, body)); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); @@ -687,7 +687,7 @@ void testEmptyStorageType() throws IOException, OS3Exception { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, body); OzoneKeyDetails key = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) .getKey(KEY_NAME); @@ -706,7 +706,7 @@ void testDirectoryCreation() throws IOException, // WHEN try (Response response = objectEndpoint.put(fsoBucket.getName(), path, - 0L, 0, "", null)) { + 0L, 0, "", null, null)) { assertEquals(HttpStatus.SC_OK, response.getStatus()); } @@ -721,12 +721,12 @@ void testDirectoryCreationOverFile() throws IOException, OS3Exception { final String path = "key"; final ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", body); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", null, body); // WHEN final OS3Exception exception = assertThrows(OS3Exception.class, () -> objectEndpoint - .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null) + .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null, null) .close()); // THEN @@ -741,7 +741,7 @@ public void testPutEmptyObject() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(emptyString.getBytes(UTF_8)); objectEndpoint.setHeaders(headersWithTags); - Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, null, body); + Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, null, null, body); assertEquals(200, putResponse.getStatus()); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(0, keyDetails.getDataSize()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java new file mode 100644 index 00000000000..edf4c9856da --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.io.ByteArrayInputStream; +import java.io.IOException; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; +import static java.net.HttpURLConnection.HTTP_NO_CONTENT; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for DeleteObjectTagging. + */ +public class TestObjectTaggingDelete { + + private static final String CONTENT = "0123456789"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_WITH_TAG = "keyWithTag"; + private HttpHeaders headers; + private ObjectEndpoint rest; + private OzoneClient client; + private ByteArrayInputStream body; + private ContainerRequestContext context; + + @BeforeEach + public void init() throws OS3Exception, IOException { + //GIVEN + OzoneConfiguration config = new OzoneConfiguration(); + client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + rest = new ObjectEndpoint(); + rest.setClient(client); + rest.setOzoneConfiguration(config); + headers = Mockito.mock(HttpHeaders.class); + rest.setHeaders(headers); + body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create a key with object tags + Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, null, body); + + + context = Mockito.mock(ContainerRequestContext.class); + Mockito.when(context.getUriInfo()).thenReturn(Mockito.mock(UriInfo.class)); + Mockito.when(context.getUriInfo().getQueryParameters()) + .thenReturn(new MultivaluedHashMap<>()); + rest.setContext(context); + } + + @Test + public void testDeleteTagging() throws IOException, OS3Exception { + Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG, null, ""); + assertEquals(HTTP_NO_CONTENT, response.getStatus()); + + assertTrue(client.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_WITH_TAG).getTags().isEmpty()); + } + + @Test + public void testDeleteTaggingNoKeyFound() throws Exception { + try { + rest.delete(BUCKET_NAME, "nonexistent", null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } + } + + @Test + public void testDeleteTaggingNoBucketFound() throws Exception { + try { + rest.delete("nonexistent", "nonexistent", null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } + } + + @Test + public void testDeleteObjectTaggingNotImplemented() throws Exception { + OzoneClient mockClient = mock(OzoneClient.class); + ObjectStore mockObjectStore = mock(ObjectStore.class); + OzoneVolume mockVolume = mock(OzoneVolume.class); + OzoneBucket mockBucket = mock(OzoneBucket.class); + + when(mockClient.getObjectStore()).thenReturn(mockObjectStore); + when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); + + ObjectEndpoint endpoint = new ObjectEndpoint(); + endpoint.setClient(mockClient); + + doThrow(new OMException("DeleteObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).deleteObjectTagging("dir/"); + + try { + endpoint.delete("fsoBucket", "dir/", null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); + assertEquals(NOT_IMPLEMENTED.getCode(), ex.getCode()); + } + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java new file mode 100644 index 00000000000..fe746f5b296 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.io.ByteArrayInputStream; +import java.io.IOException; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_OK; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; +import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_HEADER; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.fail; + +/** + * Tests for GetObjectTagging. + */ +public class TestObjectTaggingGet { + + private static final String CONTENT = "0123456789"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_WITH_TAG = "keyWithTag"; + private ObjectEndpoint rest; + + @BeforeEach + public void init() throws OS3Exception, IOException { + //GIVEN + OzoneConfiguration config = new OzoneConfiguration(); + OzoneClient client = new OzoneClientStub(); + client.getObjectStore().createS3Bucket(BUCKET_NAME); + + rest = new ObjectEndpoint(); + rest.setClient(client); + rest.setOzoneConfiguration(config); + HttpHeaders headers = Mockito.mock(HttpHeaders.class); + rest.setHeaders(headers); + ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create a key with object tags + Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); + rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), + 1, null, null, body); + + + ContainerRequestContext context = Mockito.mock(ContainerRequestContext.class); + Mockito.when(context.getUriInfo()).thenReturn(Mockito.mock(UriInfo.class)); + Mockito.when(context.getUriInfo().getQueryParameters()) + .thenReturn(new MultivaluedHashMap<>()); + rest.setContext(context); + } + + @Test + public void testGetTagging() throws IOException, OS3Exception { + //WHEN + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, ""); + + assertEquals(HTTP_OK, response.getStatus()); + S3Tagging s3Tagging = (S3Tagging) response.getEntity(); + assertNotNull(s3Tagging); + assertNotNull(s3Tagging.getTagSet()); + assertEquals(2, s3Tagging.getTagSet().getTags().size()); + for (Tag tag: s3Tagging.getTagSet().getTags()) { + if (tag.getKey().equals("tag1")) { + assertEquals("value1", tag.getValue()); + } else if (tag.getKey().equals("tag2")) { + assertEquals("value2", tag.getValue()); + } else { + fail("Unknown tag found"); + } + } + } + + @Test + public void testGetTaggingNoKeyFound() throws Exception { + try { + rest.get(BUCKET_NAME, "nonexistent", 0, null, 0, null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } + } + + @Test + public void testGetTaggingNoBucketFound() throws Exception { + try { + rest.get("nonexistent", "nonexistent", 0, null, 0, null, ""); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java new file mode 100644 index 00000000000..1067dea86ba --- /dev/null +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -0,0 +1,263 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import javax.ws.rs.core.HttpHeaders; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; + +import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; +import static java.net.HttpURLConnection.HTTP_OK; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_XML; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_BUCKET; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for PutObjectTagging. + */ +public class TestObjectTaggingPut { + + private OzoneClient clientStub; + private ObjectEndpoint objectEndpoint; + + private static final String BUCKET_NAME = "b1"; + private static final String KEY_NAME = "key=value/1"; + + @BeforeEach + void setup() throws IOException, OS3Exception { + OzoneConfiguration config = new OzoneConfiguration(); + + //Create client stub and object store stub. + clientStub = new OzoneClientStub(); + + // Create bucket + clientStub.getObjectStore().createS3Bucket(BUCKET_NAME); + + // Create PutObject and setClient to OzoneClientStub + objectEndpoint = new ObjectEndpoint(); + objectEndpoint.setClient(clientStub); + objectEndpoint.setOzoneConfiguration(config); + + HttpHeaders headers = mock(HttpHeaders.class); + ByteArrayInputStream body = + new ByteArrayInputStream("".getBytes(UTF_8)); + objectEndpoint.setHeaders(headers); + + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, null, body); + } + + @Test + public void testPutObjectTaggingWithEmptyBody() throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", + null); + fail(); + } catch (OS3Exception ex) { + assertEquals(HTTP_BAD_REQUEST, ex.getHttpCode()); + assertEquals(MALFORMED_XML.getCode(), ex.getCode()); + } + } + + @Test + public void testPutValidObjectTagging() throws Exception { + assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, + null, "", twoTags()).getStatus()); + OzoneKeyDetails keyDetails = + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + assertEquals(2, keyDetails.getTags().size()); + assertEquals("val1", keyDetails.getTags().get("tag1")); + assertEquals("val2", keyDetails.getTags().get("tag2")); + } + + @Test + public void testPutInvalidObjectTagging() throws Exception { + testInvalidObjectTagging(this::emptyBody, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::invalidXmlStructure, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::noTagSet, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::emptyTags, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::tagKeyNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + testInvalidObjectTagging(this::tagValueNotSpecified, HTTP_BAD_REQUEST, MALFORMED_XML.getCode()); + } + + private void testInvalidObjectTagging(Supplier inputStream, + int expectedHttpCode, String expectedErrorCode) throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", + inputStream.get()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(expectedHttpCode, ex.getHttpCode()); + assertEquals(expectedErrorCode, ex.getCode()); + } + } + + @Test + public void testPutObjectTaggingNoKeyFound() throws Exception { + try { + objectEndpoint.put(BUCKET_NAME, "nonexistent", 0, 1, + null, "", twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); + } + } + + @Test + public void testPutObjectTaggingNoBucketFound() throws Exception { + try { + objectEndpoint.put("nonexistent", "nonexistent", 0, 1, + null, "", twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); + assertEquals(NO_SUCH_BUCKET.getCode(), ex.getCode()); + } + } + + @Test + public void testPutObjectTaggingNotImplemented() throws Exception { + OzoneClient mockClient = mock(OzoneClient.class); + ObjectStore mockObjectStore = mock(ObjectStore.class); + OzoneVolume mockVolume = mock(OzoneVolume.class); + OzoneBucket mockBucket = mock(OzoneBucket.class); + + when(mockClient.getObjectStore()).thenReturn(mockObjectStore); + when(mockObjectStore.getS3Volume()).thenReturn(mockVolume); + when(mockVolume.getBucket("fsoBucket")).thenReturn(mockBucket); + + ObjectEndpoint endpoint = new ObjectEndpoint(); + Map twoTagsMap = new HashMap<>(); + twoTagsMap.put("tag1", "val1"); + twoTagsMap.put("tag2", "val2"); + endpoint.setClient(mockClient); + + doThrow(new OMException("PutObjectTagging is not currently supported for FSO directory", + ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).putObjectTagging("dir/", twoTagsMap); + + try { + endpoint.put("fsoBucket", "dir/", 0, 1, null, "", twoTags()); + fail("Expected an OS3Exception to be thrown"); + } catch (OS3Exception ex) { + assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); + assertEquals(NOT_IMPLEMENTED.getCode(), ex.getCode()); + } + } + + private InputStream emptyBody() { + return null; + } + + private InputStream invalidXmlStructure() { + String xml = + "" + + " " + + " "; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + private InputStream twoTags() { + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + " tag2" + + " val2" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + private InputStream noTagSet() { + String xml = + "" + + ""; + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + private InputStream emptyTags() { + String xml = + "" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + public InputStream tagKeyNotSpecified() { + String xml = + "" + + " " + + " " + + " val1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } + + public InputStream tagValueNotSpecified() { + String xml = + "" + + " " + + " " + + " tag1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } +} + diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index fbff7648297..6341e3d178f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -100,7 +100,7 @@ public void testPartUpload() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -122,7 +122,7 @@ public void testPartUploadWithOverride() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -131,7 +131,7 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -145,7 +145,7 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, - "random", body); + "random", null, body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -179,7 +179,7 @@ public void testPartUploadStreamContentLength() long contentLength = chunkedContent.length(); objectEndpoint.put(OzoneConsts.S3_BUCKET, keyName, contentLength, 1, - uploadID, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + uploadID, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertContentLength(uploadID, keyName, 15); } @@ -202,7 +202,7 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(OzoneConsts.S3_BUCKET, keyName, - contentLength, 1, uploadID, body); + contentLength, 1, uploadID, null, body); assertContentLength(uploadID, keyName, content.length()); } @@ -243,7 +243,7 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException new ByteArrayInputStream(content.getBytes(UTF_8)); try { objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 28ce32e7470..507e997b29e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -94,7 +94,7 @@ public void testPartUpload() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -115,7 +115,7 @@ public void testPartUploadWithOverride() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -124,7 +124,7 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, body); + content.length(), 1, uploadID, null, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -137,7 +137,7 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(S3BUCKET, S3KEY, content.length(), 1, - "random", body); + "random", null, body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index b74808de953..70e5665f170 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -38,12 +38,14 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import static java.net.HttpURLConnection.HTTP_FORBIDDEN; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyLong; @@ -245,7 +247,7 @@ public void testGetKey() throws IOException { objectEndpoint.setOzoneConfiguration(conf); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.get( - "bucketName", "keyPath", 0, null, 1000, "marker")); + "bucketName", "keyPath", 0, null, 1000, "marker", null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -261,7 +263,7 @@ public void testPutKey() throws IOException { objectEndpoint.setOzoneConfiguration(conf); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "bucketName", "keyPath", 1024, 0, null, + "bucketName", "keyPath", 1024, 0, null, null, new ByteArrayInputStream(new byte[]{}))); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -277,7 +279,7 @@ public void testDeleteKey() throws IOException { objectEndpoint.setOzoneConfiguration(conf); OS3Exception e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath", null)); + objectEndpoint.delete("bucketName", "keyPath", null, null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -294,4 +296,44 @@ public void testMultiUploadKey() throws IOException { objectEndpoint.initializeMultipartUpload("bucketName", "keyPath")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } + + @Test + public void testObjectTagging() throws Exception { + when(objectStore.getVolume(anyString())).thenReturn(volume); + when(objectStore.getS3Volume()).thenReturn(volume); + when(objectStore.getS3Bucket(anyString())).thenReturn(bucket); + when(volume.getBucket("bucketName")).thenReturn(bucket); + when(bucket.getObjectTagging(anyString())).thenThrow(exception); + doThrow(exception).when(bucket).putObjectTagging(anyString(), anyMap()); + doThrow(exception).when(bucket).deleteObjectTagging(anyString()); + + ObjectEndpoint objectEndpoint = new ObjectEndpoint(); + objectEndpoint.setClient(client); + + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + ""; + + InputStream tagInput = new ByteArrayInputStream(xml.getBytes(UTF_8)); + + OS3Exception e = assertThrows(OS3Exception.class, () -> + objectEndpoint.put("bucketName", "keyPath", 0, 1, + null, "", tagInput)); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + + e = assertThrows(OS3Exception.class, () -> + objectEndpoint.delete("bucketName", "keyPath", "", "")); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + + e = assertThrows(OS3Exception.class, () -> + objectEndpoint.get("bucketName", "keyPath", 0, null, + 0, null, "")); + assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index d988b430230..6dbcada1df3 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -106,7 +106,7 @@ public void testUpload() throws Exception { byte[] keyContent = S3_COPY_EXISTING_KEY_CONTENT.getBytes(UTF_8); ByteArrayInputStream body = new ByteArrayInputStream(keyContent); - Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, body); + Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, null, body); assertEquals(200, response.getStatus()); } @@ -140,7 +140,7 @@ public void testUploadWithCopy() throws Exception { .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); REST.setHeaders(headers); - Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, null); + Response response = REST.put(S3BUCKET, S3KEY, 0, 0, null, null, null); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 947b0986c8e..5bf4201b989 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -310,7 +310,7 @@ public void testCreateKeySuccess() throws Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create the file keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, body); body.close(); long curMetric = metrics.getCreateKeySuccess(); assertEquals(1L, curMetric - oriMetric); @@ -322,7 +322,7 @@ public void testCreateKeyFailure() throws Exception { // Create the file in a bucket that does not exist OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - "unknownBucket", keyName, CONTENT.length(), 1, null, null)); + "unknownBucket", keyName, CONTENT.length(), 1, null, null, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getCreateKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -334,7 +334,7 @@ public void testDeleteKeySuccess() throws Exception { long oriMetric = metrics.getDeleteKeySuccess(); bucket.createKey(keyName, 0).close(); - keyEndpoint.delete(bucketName, keyName, null); + keyEndpoint.delete(bucketName, keyName, null, null); long curMetric = metrics.getDeleteKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -343,7 +343,7 @@ public void testDeleteKeySuccess() throws Exception { public void testDeleteKeyFailure() throws Exception { long oriMetric = metrics.getDeleteKeyFailure(); OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - "unknownBucket", keyName, null)); + "unknownBucket", keyName, null, null)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getDeleteKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -358,9 +358,9 @@ public void testGetKeySuccess() throws Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); // Create the file keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); + .length(), 1, null, null, body); // GET the key from the bucket - Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null); + Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null, null); StreamingOutput stream = (StreamingOutput) response.getEntity(); stream.write(new ByteArrayOutputStream()); long curMetric = metrics.getGetKeySuccess(); @@ -373,7 +373,7 @@ public void testGetKeyFailure() throws Exception { // Fetching a non-existent key OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, "unknownKey", 0, null, 0, null)); + bucketName, "unknownKey", 0, null, 0, null, null)); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), e.getCode()); long curMetric = metrics.getGetKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -407,7 +407,7 @@ public void testAbortMultiPartUploadSuccess() throws Exception { long oriMetric = metrics.getAbortMultiPartUploadSuccess(); // Abort the Upload Successfully by deleting the key using the Upload-Id - keyEndpoint.delete(bucketName, keyName, uploadID); + keyEndpoint.delete(bucketName, keyName, uploadID, null); long curMetric = metrics.getAbortMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -419,7 +419,7 @@ public void testAbortMultiPartUploadFailure() throws Exception { // Fail the Abort Method by providing wrong uploadID OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - bucketName, keyName, "wrongId")); + bucketName, keyName, "wrongId", null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getAbortMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -466,7 +466,7 @@ public void testCreateMultipartKeySuccess() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); keyEndpoint.put(bucketName, keyName, CONTENT.length(), - 1, uploadID, body); + 1, uploadID, null, body); long curMetric = metrics.getCreateMultipartKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -475,7 +475,7 @@ public void testCreateMultipartKeySuccess() throws Exception { public void testCreateMultipartKeyFailure() throws Exception { long oriMetric = metrics.getCreateMultipartKeyFailure(); OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, "randomId", null)); + bucketName, keyName, CONTENT.length(), 1, "randomId", null, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCreateMultipartKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -490,7 +490,7 @@ public void testListPartsSuccess() throws Exception { // Listing out the parts by providing the uploadID keyEndpoint.get(bucketName, keyName, 0, - uploadID, 3, null); + uploadID, 3, null, null); long curMetric = metrics.getListPartsSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -501,7 +501,7 @@ public void testListPartsFailure() throws Exception { long oriMetric = metrics.getListPartsFailure(); // Listing out the parts by providing the uploadID after aborting OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, keyName, 0, "wrong_id", 3, null)); + bucketName, keyName, 0, "wrong_id", 3, null, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getListPartsFailure(); assertEquals(1L, curMetric - oriMetric); @@ -522,14 +522,14 @@ public void testCopyObject() throws Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); keyEndpoint.put(bucketName, keyName, - CONTENT.length(), 1, null, body); + CONTENT.length(), 1, null, null, body); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( bucketName + "/" + urlEncode(keyName)); keyEndpoint.put(destBucket, destKey, CONTENT.length(), 1, - null, body); + null, null, body); long curMetric = metrics.getCopyObjectSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -538,13 +538,113 @@ public void testCopyObject() throws Exception { // source and dest same when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body), + bucketName, keyName, CONTENT.length(), 1, null, null, body), "Test for CopyObjectMetric failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); curMetric = metrics.getCopyObjectFailure(); assertEquals(1L, curMetric - oriMetric); } + @Test + public void testPutObjectTaggingSuccess() throws Exception { + long oriMetric = metrics.getPutObjectTaggingSuccess(); + + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + // Create the file + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", getPutTaggingBody()); + + long curMetric = metrics.getPutObjectTaggingSuccess(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testPutObjectTaggingFailure() throws Exception { + long oriMetric = metrics.getPutObjectTaggingFailure(); + + // Put object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.put(bucketName, "nonexistent", 0, 1, null, "", getPutTaggingBody()) + ); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + + long curMetric = metrics.getPutObjectTaggingFailure(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testGetObjectTaggingSuccess() throws Exception { + long oriMetric = metrics.getGetObjectTaggingSuccess(); + + // Create the file + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", getPutTaggingBody()); + + // Get object tagging + keyEndpoint.get(bucketName, keyName, 0, + null, 0, null, ""); + + long curMetric = metrics.getGetObjectTaggingSuccess(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testGetObjectTaggingFailure() throws Exception { + long oriMetric = metrics.getGetObjectTaggingFailure(); + + // Get object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.get(bucketName, "nonexistent", 0, null, + 0, null, "")); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + long curMetric = metrics.getGetObjectTaggingFailure(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testDeleteObjectTaggingSuccess() throws Exception { + long oriMetric = metrics.getDeleteObjectTaggingSuccess(); + + // Create the file + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + keyEndpoint.put(bucketName, keyName, CONTENT + .length(), 1, null, null, body); + body.close(); + + // Put object tagging + keyEndpoint.put(bucketName, keyName, 0, 1, null, "", getPutTaggingBody()); + + // Delete object tagging + keyEndpoint.delete(bucketName, keyName, null, ""); + + long curMetric = metrics.getDeleteObjectTaggingSuccess(); + assertEquals(1L, curMetric - oriMetric); + } + + @Test + public void testDeleteObjectTaggingFailure() throws Exception { + long oriMetric = metrics.getDeleteObjectTaggingFailure(); + + // Delete object tagging for nonexistent key + OS3Exception ex = assertThrows(OS3Exception.class, () -> + keyEndpoint.delete(bucketName, "nonexistent", null, "")); + assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); + long curMetric = metrics.getDeleteObjectTaggingFailure(); + assertEquals(1L, curMetric - oriMetric); + } + private OzoneClient createClientWithKeys(String... keys) throws IOException { for (String key : keys) { bucket.createKey(key, 0).close(); @@ -567,4 +667,18 @@ private String initiateMultipartUpload(String bktName, String key) } return "Invalid-Id"; } + + private static InputStream getPutTaggingBody() { + String xml = + "" + + " " + + " " + + " tag1" + + " val1" + + " " + + " " + + ""; + + return new ByteArrayInputStream(xml.getBytes(UTF_8)); + } }