Skip to content

Commit

Permalink
Storage and retrieval headers like Content-Disposition
Browse files Browse the repository at this point in the history
  • Loading branch information
afranken committed Jun 17, 2023
1 parent 0a9c368 commit a669a8e
Show file tree
Hide file tree
Showing 15 changed files with 203 additions and 100 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2017-2022 Adobe.
* Copyright 2017-2023 Adobe.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -21,12 +21,17 @@ import org.assertj.core.api.Assertions
import org.assertj.core.api.Assertions.assertThat
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.TestInfo
import org.springframework.http.ContentDisposition
import software.amazon.awssdk.core.sync.RequestBody
import software.amazon.awssdk.services.s3.model.GetObjectRequest
import software.amazon.awssdk.services.s3.model.HeadObjectRequest
import software.amazon.awssdk.services.s3.model.PutObjectRequest
import software.amazon.awssdk.services.s3.model.S3Exception
import java.io.File
import java.io.FileInputStream
import java.io.InputStream
import java.time.Instant
import java.time.temporal.ChronoUnit

internal class GetPutDeleteObjectV2IT : S3TestBase() {

Expand Down Expand Up @@ -63,6 +68,61 @@ internal class GetPutDeleteObjectV2IT : S3TestBase() {
assertThat(object2.response().eTag()).isEqualTo(object2Again.response().eTag())
}

@Test
fun testPutGetHeadObject_storeHeaders(testInfo: TestInfo) {
val bucket = givenRandomBucketV2()
val uploadFile = File(UPLOAD_FILE_NAME)
val contentDisposition = ContentDisposition.formData()
.name("file")
.filename("sampleFile.txt")
.build()
.toString()
val expires = Instant.now()
val encoding = "SomeEncoding"
val contentLanguage = "SomeLanguage"
val cacheControl = "SomeCacheControl"

s3ClientV2.putObject(
PutObjectRequest.builder()
.bucket(bucket)
.key(UPLOAD_FILE_NAME)
.contentDisposition(contentDisposition)
.contentEncoding(encoding)
.expires(expires)
.contentLanguage(contentLanguage)
.cacheControl(cacheControl)
.build(),
RequestBody.fromFile(uploadFile))

val getObjectResponseResponse = getObjectV2(bucket, UPLOAD_FILE_NAME)

assertThat(getObjectResponseResponse.response().contentDisposition())
.isEqualTo(contentDisposition)
assertThat(getObjectResponseResponse.response().contentEncoding())
.isEqualTo(encoding)
// time in second precision, see
// https://www.rfc-editor.org/rfc/rfc7234#section-5.3
// https://www.rfc-editor.org/rfc/rfc7231#section-7.1.1.1
assertThat(getObjectResponseResponse.response().expires())
.isEqualTo(expires.truncatedTo(ChronoUnit.SECONDS))
assertThat(getObjectResponseResponse.response().contentLanguage())
.isEqualTo(contentLanguage)
assertThat(getObjectResponseResponse.response().cacheControl())
.isEqualTo(cacheControl)

val headObjectResponse = s3ClientV2.headObject(
HeadObjectRequest.builder()
.bucket(bucket)
.key(UPLOAD_FILE_NAME)
.build()
)
assertThat(headObjectResponse.contentDisposition()).isEqualTo(contentDisposition)
assertThat(headObjectResponse.contentEncoding()).isEqualTo(encoding)
assertThat(headObjectResponse.expires()).isEqualTo(expires.truncatedTo(ChronoUnit.SECONDS))
assertThat(headObjectResponse.contentLanguage()).isEqualTo(contentLanguage)
assertThat(headObjectResponse.cacheControl()).isEqualTo(cacheControl)
}

@Test
@S3VerifiedSuccess(year = 2022)
fun testGetObject_successWithMatchingEtag(testInfo: TestInfo) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@
import static com.adobe.testing.s3mock.util.AwsHttpParameters.PART_NUMBER;
import static com.adobe.testing.s3mock.util.AwsHttpParameters.UPLOADS;
import static com.adobe.testing.s3mock.util.AwsHttpParameters.UPLOAD_ID;
import static com.adobe.testing.s3mock.util.HeaderUtil.getUserMetadata;
import static com.adobe.testing.s3mock.util.HeaderUtil.isV4ChunkedWithSigningEnabled;
import static org.springframework.http.HttpHeaders.CONTENT_ENCODING;
import static com.adobe.testing.s3mock.util.HeaderUtil.parseStoreHeaders;
import static com.adobe.testing.s3mock.util.HeaderUtil.parseUserMetadata;
import static org.springframework.http.HttpHeaders.CONTENT_TYPE;
import static org.springframework.http.MediaType.APPLICATION_XML_VALUE;

Expand Down Expand Up @@ -322,16 +322,16 @@ public ResponseEntity<InitiateMultipartUploadResult> createMultipartUpload(
value = X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID,
required = false) String kmsKeyId,
@RequestHeader(value = CONTENT_TYPE, required = false) String contentType,
@RequestHeader(value = CONTENT_ENCODING, required = false) String contentEncoding,
@RequestHeader HttpHeaders httpHeaders) {
bucketService.verifyBucketExists(bucketName);

Map<String, String> userMetadata = getUserMetadata(httpHeaders);
Map<String, String> userMetadata = parseUserMetadata(httpHeaders);
Map<String, String> storeHeaders = parseStoreHeaders(httpHeaders);

String uploadId = UUID.randomUUID().toString();
InitiateMultipartUploadResult result =
multipartService.prepareMultipartUpload(bucketName, key.getKey(),
contentType, contentEncoding, uploadId,
contentType, storeHeaders, uploadId,
DEFAULT_OWNER, DEFAULT_OWNER, userMetadata);

return ResponseEntity.ok(result);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,10 @@
import static com.adobe.testing.s3mock.util.HeaderUtil.createEncryptionHeaders;
import static com.adobe.testing.s3mock.util.HeaderUtil.createOverrideHeaders;
import static com.adobe.testing.s3mock.util.HeaderUtil.createUserMetadataHeaders;
import static com.adobe.testing.s3mock.util.HeaderUtil.getUserMetadata;
import static com.adobe.testing.s3mock.util.HeaderUtil.isV4ChunkedWithSigningEnabled;
import static com.adobe.testing.s3mock.util.HeaderUtil.parseMediaType;
import static org.springframework.http.HttpHeaders.CONTENT_ENCODING;
import static com.adobe.testing.s3mock.util.HeaderUtil.parseStoreHeaders;
import static com.adobe.testing.s3mock.util.HeaderUtil.parseUserMetadata;
import static org.springframework.http.HttpHeaders.CONTENT_TYPE;
import static org.springframework.http.HttpHeaders.IF_MATCH;
import static org.springframework.http.HttpHeaders.IF_NONE_MATCH;
Expand Down Expand Up @@ -171,8 +171,8 @@ public ResponseEntity<Void> headObject(@PathVariable String bucketName,
objectService.verifyObjectMatching(match, noneMatch, s3ObjectMetadata);
return ResponseEntity.ok()
.eTag(s3ObjectMetadata.getEtag())
.header(HttpHeaders.CONTENT_ENCODING, s3ObjectMetadata.getContentEncoding())
.header(HttpHeaders.ACCEPT_RANGES, RANGES_BYTES)
.headers(headers -> headers.setAll(s3ObjectMetadata.getStoreHeaders()))
.headers(headers -> headers.setAll(createUserMetadataHeaders(s3ObjectMetadata)))
.headers(headers -> headers.setAll(createEncryptionHeaders(s3ObjectMetadata)))
.lastModified(s3ObjectMetadata.getLastModified())
Expand Down Expand Up @@ -252,8 +252,8 @@ public ResponseEntity<StreamingResponseBody> getObject(@PathVariable String buck
return ResponseEntity
.ok()
.eTag(s3ObjectMetadata.getEtag())
.header(HttpHeaders.CONTENT_ENCODING, s3ObjectMetadata.getContentEncoding())
.header(HttpHeaders.ACCEPT_RANGES, RANGES_BYTES)
.headers(headers -> headers.setAll(s3ObjectMetadata.getStoreHeaders()))
.headers(headers -> headers.setAll(createUserMetadataHeaders(s3ObjectMetadata)))
.headers(headers -> headers.setAll(createEncryptionHeaders(s3ObjectMetadata)))
.lastModified(s3ObjectMetadata.getLastModified())
Expand Down Expand Up @@ -529,7 +529,6 @@ public ResponseEntity<Void> putObject(@PathVariable String bucketName,
value = X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID,
required = false) String kmsKeyId,
@RequestHeader(name = X_AMZ_TAGGING, required = false) List<Tag> tags,
@RequestHeader(value = CONTENT_ENCODING, required = false) String contentEncoding,
@RequestHeader(value = CONTENT_TYPE, required = false) String contentType,
@RequestHeader(value = CONTENT_MD5, required = false) String contentMd5,
@RequestHeader(value = X_AMZ_CONTENT_SHA256, required = false) String sha256Header,
Expand All @@ -540,12 +539,13 @@ public ResponseEntity<Void> putObject(@PathVariable String bucketName,
InputStream stream = objectService.verifyMd5(inputStream, contentMd5, sha256Header);
//TODO: need to extract owner from headers
Owner owner = Owner.DEFAULT_OWNER;
Map<String, String> userMetadata = getUserMetadata(headers);
Map<String, String> userMetadata = parseUserMetadata(headers);
Map<String, String> storeHeaders = parseStoreHeaders(headers);
S3ObjectMetadata s3ObjectMetadata =
objectService.putS3Object(bucketName,
key.getKey(),
parseMediaType(contentType).toString(),
contentEncoding,
storeHeaders,
stream,
isV4ChunkedWithSigningEnabled(sha256Header),
userMetadata,
Expand Down Expand Up @@ -612,7 +612,7 @@ public ResponseEntity<CopyObjectResult> copyObject(@PathVariable String bucketNa

Map<String, String> metadata = Collections.emptyMap();
if (MetadataDirective.REPLACE == metadataDirective) {
metadata = getUserMetadata(httpHeaders);
metadata = parseUserMetadata(httpHeaders);
}

//TODO: this is potentially illegal on S3. S3 throws a 400:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2017-2022 Adobe.
* Copyright 2017-2023 Adobe.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -200,7 +200,7 @@ public CompleteMultipartUploadResult completeMultipartUpload(String bucketName,
* @param bucketName Bucket to upload object in
* @param key object to upload
* @param contentType the content type
* @param contentEncoding the content encoding
* @param storeHeaders various headers to store
* @param uploadId id of the upload
* @param owner owner of the upload
* @param initiator initiator of the upload
Expand All @@ -209,13 +209,13 @@ public CompleteMultipartUploadResult completeMultipartUpload(String bucketName,
* @return upload result
*/
public InitiateMultipartUploadResult prepareMultipartUpload(String bucketName, String key,
String contentType, String contentEncoding, String uploadId,
String contentType, Map<String, String> storeHeaders, String uploadId,
Owner owner, Owner initiator, Map<String, String> userMetadata) {
BucketMetadata bucketMetadata = bucketStore.getBucketMetadata(bucketName);
UUID id = bucketStore.addToBucket(key, bucketName);

try {
multipartStore.prepareMultipartUpload(bucketMetadata, key, id, contentType, contentEncoding,
multipartStore.prepareMultipartUpload(bucketMetadata, key, id, contentType, storeHeaders,
uploadId, owner, initiator, userMetadata);
return new InitiateMultipartUploadResult(bucketName, key, uploadId);
} catch (Exception e) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2017-2022 Adobe.
* Copyright 2017-2023 Adobe.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -43,15 +43,11 @@
import com.adobe.testing.s3mock.store.S3ObjectMetadata;
import com.adobe.testing.s3mock.util.AwsChunkedDecodingInputStream;
import com.adobe.testing.s3mock.util.DigestUtil;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.time.Instant;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -122,7 +118,7 @@ public CopyObjectResult copyS3Object(String sourceBucketName,
* @param bucketName Bucket to store the object in.
* @param key object key to be stored.
* @param contentType The files Content Type.
* @param contentEncoding The files Content Encoding.
* @param storeHeaders various headers to store
* @param dataStream The File as InputStream.
* @param useV4ChunkedWithSigningFormat If {@code true}, V4-style signing is enabled.
* @param userMetadata User metadata to store for this object, will be available for the
Expand All @@ -135,7 +131,7 @@ public CopyObjectResult copyS3Object(String sourceBucketName,
public S3ObjectMetadata putS3Object(String bucketName,
String key,
String contentType,
String contentEncoding,
Map<String, String> storeHeaders,
InputStream dataStream,
boolean useV4ChunkedWithSigningFormat,
Map<String, String> userMetadata,
Expand All @@ -148,7 +144,7 @@ public S3ObjectMetadata putS3Object(String bucketName,
if (id == null) {
id = bucketStore.addToBucket(key, bucketName);
}
return objectStore.storeS3ObjectMetadata(bucketMetadata, id, key, contentType, contentEncoding,
return objectStore.storeS3ObjectMetadata(bucketMetadata, id, key, contentType, storeHeaders,
dataStream, useV4ChunkedWithSigningFormat, userMetadata, encryption, kmsKeyId, null, tags,
owner);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2017-2022 Adobe.
* Copyright 2017-2023 Adobe.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -77,7 +77,7 @@ public MultipartStore(boolean retainFilesOnExit, ObjectStore objectStore) {
* @param key object to upload
* @param id ID of the object
* @param contentType the content type
* @param contentEncoding the content encoding
* @param storeHeaders various headers to store
* @param uploadId id of the upload
* @param owner owner of the upload
* @param initiator initiator of the upload
Expand All @@ -86,7 +86,7 @@ public MultipartStore(boolean retainFilesOnExit, ObjectStore objectStore) {
* @return upload result
*/
public MultipartUpload prepareMultipartUpload(BucketMetadata bucket, String key, UUID id,
String contentType, String contentEncoding, String uploadId,
String contentType, Map<String, String> storeHeaders, String uploadId,
Owner owner, Owner initiator, Map<String, String> userMetadata) {
if (!createPartsFolder(bucket, id, uploadId)) {
LOG.error("Directories for storing multipart uploads couldn't be created. bucket={}, key={}, "
Expand All @@ -97,7 +97,7 @@ public MultipartUpload prepareMultipartUpload(BucketMetadata bucket, String key,
MultipartUpload upload =
new MultipartUpload(key, uploadId, owner, initiator, new Date());
uploadIdToInfo.put(uploadId, new MultipartUploadInfo(upload,
contentType, contentEncoding, userMetadata, bucket.getName()));
contentType, storeHeaders, userMetadata, bucket.getName()));

return upload;
}
Expand Down Expand Up @@ -223,7 +223,7 @@ public String completeMultipartUpload(BucketMetadata bucket, String key, UUID id
id,
key,
uploadInfo.contentType,
uploadInfo.contentEncoding,
uploadInfo.storeHeaders,
inputStream,
false, //TODO: no signing?
uploadInfo.userMetadata,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2017-2022 Adobe.
* Copyright 2017-2023 Adobe.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -26,17 +26,17 @@ class MultipartUploadInfo {

final MultipartUpload upload;
final String contentType;
final String contentEncoding;
final Map<String, String> userMetadata;
final Map<String, String> storeHeaders;
final String bucket;

MultipartUploadInfo(final MultipartUpload upload, final String contentType,
final String contentEncoding,
final Map<String, String> storeHeaders,
final Map<String, String> userMetadata,
String bucket) {
this.upload = upload;
this.contentType = contentType;
this.contentEncoding = contentEncoding;
this.storeHeaders = storeHeaders;
this.userMetadata = userMetadata;
this.bucket = bucket;
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2017-2022 Adobe.
* Copyright 2017-2023 Adobe.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -88,7 +88,7 @@ public ObjectStore(boolean retainFilesOnExit,
* @param id object ID
* @param key object key to be stored.
* @param contentType The Content Type.
* @param contentEncoding The Content Encoding.
* @param storeHeaders Various headers to store, like Content Encoding.
* @param dataStream The InputStream to store.
* @param useV4ChunkedWithSigningFormat If {@code true}, V4-style signing is enabled.
* @param userMetadata User metadata to store for this object, will be available for the
Expand All @@ -104,7 +104,7 @@ public S3ObjectMetadata storeS3ObjectMetadata(BucketMetadata bucket,
UUID id,
String key,
String contentType,
String contentEncoding,
Map<String, String> storeHeaders,
InputStream dataStream,
boolean useV4ChunkedWithSigningFormat,
Map<String, String> userMetadata,
Expand All @@ -119,7 +119,7 @@ public S3ObjectMetadata storeS3ObjectMetadata(BucketMetadata bucket,
s3ObjectMetadata.setId(id);
s3ObjectMetadata.setKey(key);
s3ObjectMetadata.setContentType(contentType);
s3ObjectMetadata.setContentEncoding(contentEncoding);
s3ObjectMetadata.setStoreHeaders(storeHeaders);
s3ObjectMetadata.setUserMetadata(userMetadata);
s3ObjectMetadata.setTags(tags);
s3ObjectMetadata.setEncrypted(encrypted);
Expand Down Expand Up @@ -272,7 +272,7 @@ public CopyObjectResult copyS3Object(BucketMetadata sourceBucket,
destinationId,
destinationKey,
sourceObject.getContentType(),
sourceObject.getContentEncoding(),
sourceObject.getStoreHeaders(),
inputStream,
false,
userMetadata == null || userMetadata.isEmpty()
Expand Down
Loading

0 comments on commit a669a8e

Please sign in to comment.