Skip to content

Commit

Permalink
STG93 Added ACL response headers and x-ms-upn request header (#38450)
Browse files Browse the repository at this point in the history
* implementation

* reformatting context logic, acl test, unfunctional upn test

* edit to parseList in PathAccessControlEntry

* style

* made upn header appear in request and finished upn test

* fixing issue with pipeline

* making upn context adjustment its own method and editing its usage

* fixing pipeline again

* fixing upn test

* resolving comments
  • Loading branch information
ibrandes authored Jan 27, 2024
1 parent 8aa433a commit de9c095
Show file tree
Hide file tree
Showing 23 changed files with 716 additions and 64 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,15 @@ public void addPolicyForBlobServiceClientBuilder() {

HttpPipeline pipeline = blobServiceClient.getHttpPipeline();
assertTrue(pipeline.getPolicyCount() >= 10);
assertEquals(SleuthHttpPolicy.class, pipeline.getPolicy(6).getClass());
boolean sleuthPolicyFound = false;
int policyCount = pipeline.getPolicyCount();
for (int i = 0; i < policyCount; i++) {
if (SleuthHttpPolicy.class.equals(pipeline.getPolicy(i).getClass())) {
sleuthPolicyFound = true;
break;
}
}
assertTrue(sleuthPolicyFound);
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import com.azure.core.http.HttpPipeline;
import com.azure.core.http.HttpPipelineBuilder;
import com.azure.core.http.policy.AddDatePolicy;
import com.azure.core.http.policy.AddHeadersFromContextPolicy;
import com.azure.core.http.policy.AddHeadersPolicy;
import com.azure.core.http.policy.AzureSasCredentialPolicy;
import com.azure.core.http.policy.BearerTokenAuthenticationPolicy;
Expand Down Expand Up @@ -104,6 +105,8 @@ public static HttpPipeline buildPipeline(

policies.add(new AddDatePolicy());

policies.add(new AddHeadersFromContextPolicy());

// We need to place this policy right before the credential policy since headers may affect the string to sign
// of the request.
HttpHeaders headers = CoreUtils.createHttpHeadersFromClientOptions(clientOptions);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,6 @@ public void pageBlobClientBuilderCheck() {
.buildPageBlobAsyncClient());
}

//todo isbr getEncryptionScope is protected **
@Test
public void getEncryptionScopeClient() {
String newEncryptionScope = "newtestscope";
Expand Down
2 changes: 1 addition & 1 deletion sdk/storage/azure-storage-file-datalake/assets.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "java",
"TagPrefix": "java/storage/azure-storage-file-datalake",
"Tag": "java/storage/azure-storage-file-datalake_454e33b26b"
"Tag": "java/storage/azure-storage-file-datalake_3bdc50f65a"
}
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import com.azure.storage.common.implementation.StorageImplUtils;
import com.azure.storage.common.implementation.UploadUtils;
import com.azure.storage.file.datalake.implementation.models.InternalDataLakeFileOpenInputStreamResult;
import com.azure.storage.file.datalake.implementation.util.BuilderHelper;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.implementation.util.ModelHelper;
import com.azure.storage.file.datalake.models.CustomerProvidedKey;
Expand All @@ -52,6 +53,7 @@
import com.azure.storage.file.datalake.options.FileParallelUploadOptions;
import com.azure.storage.file.datalake.options.FileQueryOptions;
import com.azure.storage.file.datalake.options.FileScheduleDeletionOptions;
import com.azure.storage.file.datalake.options.ReadToFileOptions;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;

Expand Down Expand Up @@ -1135,6 +1137,8 @@ public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStream
* @throws DataLakeStorageException If a storage service error occurred.
*/
public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStreamOptions options, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);

BlobInputStreamOptions convertedOptions = Transforms.toBlobInputStreamOptions(options);
BlobInputStream inputStream = blockBlobClient.openInputStream(convertedOptions, context);
return new InternalDataLakeFileOpenInputStreamResult(inputStream,
Expand Down Expand Up @@ -1212,6 +1216,33 @@ public PathProperties readToFile(String filePath) {
return readToFile(filePath, false);
}

/**
* Reads the entire file into a file specified by the path.
*
* <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.</p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile#ReadToFileOptions -->
* <pre>
* client.readToFile&#40;new ReadToFileOptions&#40;&#41;.setFilePath&#40;file&#41;&#41;;
* System.out.println&#40;&quot;Completed download to file&quot;&#41;;
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile#ReadToFileOptions -->
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/rest/api/storageservices/get-blob">Azure Docs</a></p>
*
* @param options {@link ReadToFileOptions}
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options) {
return readToFile(options, false);
}

/**
* Reads the entire file into a file specified by the path.
*
Expand Down Expand Up @@ -1251,6 +1282,44 @@ public PathProperties readToFile(String filePath, boolean overwrite) {
.getValue();
}

/**
* Reads the entire file into a file specified by the path.
*
* <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.</p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFile#ReadToFileOptions-boolean -->
* <pre>
* boolean overwrite1 = false; &#47;&#47; Default value
* client.readToFile&#40;new ReadToFileOptions&#40;&#41;.setFilePath&#40;file&#41;, overwrite1&#41;;
* System.out.println&#40;&quot;Completed download to file&quot;&#41;;
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFile#ReadToFileOptions-boolean -->
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/rest/api/storageservices/get-blob">Azure Docs</a></p>
*
* @param options {@link ReadToFileOptions}
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties readToFile(ReadToFileOptions options, boolean overwrite) {
Set<OpenOption> openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); // If the file already exists and it is opened
// for WRITE access, then its length is truncated to 0.
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
options.setOpenOptions(openOptions);
}
return readToFileWithResponse(options, null, Context.NONE)
.getValue();
}

/**
* Reads the entire file into a file specified by the path.
*
Expand Down Expand Up @@ -1302,7 +1371,56 @@ public Response<PathProperties> readToFileWithResponse(String filePath, FileRang
.setRequestConditions(Transforms.toBlobRequestConditions(requestConditions))
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout,
context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue()));
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}

/**
* Reads the entire file into a file specified by the path.
*
* <p>By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions} </p>
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse#ReadToFileOptions-Duration-Context -->
* <pre>
* ReadToFileOptions options = new ReadToFileOptions&#40;&#41;;
* options.setRange&#40;new FileRange&#40;1024, 2048L&#41;&#41;;
* options.setDownloadRetryOptions&#40;new DownloadRetryOptions&#40;&#41;.setMaxRetryRequests&#40;5&#41;&#41;;
* options.setOpenOptions&#40;new HashSet&lt;&gt;&#40;Arrays.asList&#40;StandardOpenOption.CREATE_NEW,
* StandardOpenOption.WRITE, StandardOpenOption.READ&#41;&#41;&#41;; &#47;&#47;Default options
* options.setParallelTransferOptions&#40;new ParallelTransferOptions&#40;&#41;.setBlockSizeLong&#40;4L * Constants.MB&#41;&#41;;
* options.setDataLakeRequestConditions&#40;null&#41;;
* options.setRangeGetContentMd5&#40;false&#41;;
*
* client.readToFileWithResponse&#40;options, timeout, new Context&#40;key2, value2&#41;&#41;;
* System.out.println&#40;&quot;Completed download to file&quot;&#41;;
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse#ReadToFileOptions-Duration-Context -->
*
* @param options {@link ReadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the file properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> readToFileWithResponse(ReadToFileOptions options, Duration timeout, Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;

return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.downloadToFileWithResponse(
new BlobDownloadToFileOptions(options.getFilePath())
.setRange(Transforms.toBlobRange(options.getRange()))
.setParallelTransferOptions(options.getParallelTransferOptions())
.setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options.getDownloadRetryOptions()))
.setRequestConditions(Transforms.toBlobRequestConditions(options.getDataLakeRequestConditions()))
.setRetrieveContentRangeMd5(options.isRangeGetContentMd5())
.setOpenOptions(options.getOpenOptions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import com.azure.storage.file.datalake.implementation.models.CpkInfo;
import com.azure.storage.file.datalake.implementation.models.PathSetAccessControlRecursiveMode;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.implementation.util.BuilderHelper;
import com.azure.storage.file.datalake.models.AccessControlChangeResult;
import com.azure.storage.file.datalake.models.CustomerProvidedKey;
import com.azure.storage.file.datalake.models.DataLakeAclChangeFailedException;
Expand All @@ -34,6 +35,7 @@
import com.azure.storage.file.datalake.models.UserDelegationKey;
import com.azure.storage.file.datalake.options.DataLakePathCreateOptions;
import com.azure.storage.file.datalake.options.DataLakePathDeleteOptions;
import com.azure.storage.file.datalake.options.PathGetPropertiesOptions;
import com.azure.storage.file.datalake.options.PathRemoveAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathSetAccessControlRecursiveOptions;
import com.azure.storage.file.datalake.options.PathUpdateAccessControlRecursiveOptions;
Expand Down Expand Up @@ -1144,7 +1146,32 @@ public Response<PathAccessControl> getAccessControlWithResponse(boolean userPrin
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
return getPropertiesWithResponse((DataLakeRequestConditions) null, null, Context.NONE).getValue();
}

/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getProperties#PathGetPropertiesOptions -->
* <pre>
* PathGetPropertiesOptions options = new PathGetPropertiesOptions&#40;&#41;.setUpn&#40;true&#41;;
*
* System.out.printf&#40;&quot;Creation Time: %s, Size: %d%n&quot;, client.getProperties&#40;options&#41;.getCreationTime&#40;&#41;,
* client.getProperties&#40;options&#41;.getFileSize&#40;&#41;&#41;;
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getProperties#PathGetPropertiesOptions -->
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/rest/api/storageservices/get-blob-properties">Azure Docs</a></p>
*
* @param options {@link PathGetPropertiesOptions}
* @return The resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public PathProperties getProperties(PathGetPropertiesOptions options) {
return getPropertiesWithResponse(options, null, Context.NONE).getValue();
}

/**
Expand Down Expand Up @@ -1182,6 +1209,45 @@ public Response<PathProperties> getPropertiesWithResponse(DataLakeRequestConditi
}, LOGGER);
}

/**
* Returns the resource's metadata and properties.
*
* <p><strong>Code Samples</strong></p>
*
* <!-- src_embed com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse#PathGetPropertiesOptions-Duration-Context -->
* <pre>
* PathGetPropertiesOptions options = new PathGetPropertiesOptions&#40;&#41;.setUpn&#40;true&#41;;
*
* Response&lt;PathProperties&gt; response2 = client.getPropertiesWithResponse&#40;options, timeout,
* new Context&#40;key2, value2&#41;&#41;;
*
* System.out.printf&#40;&quot;Creation Time: %s, Size: %d%n&quot;, response2.getValue&#40;&#41;.getCreationTime&#40;&#41;,
* response2.getValue&#40;&#41;.getFileSize&#40;&#41;&#41;;
* </pre>
* <!-- end com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse#PathGetPropertiesOptions-Duration-Context -->
*
* <p>For more information, see the
* <a href="https://docs.microsoft.com/rest/api/storageservices/get-blob-properties">Azure Docs</a></p>
*
* @param options {@link PathGetPropertiesOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the resource properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response<PathProperties> getPropertiesWithResponse(PathGetPropertiesOptions options, Duration timeout,
Context context) {
context = BuilderHelper.addUpnHeader(() -> (options == null) ? null : options.isUpn(), context);
Context finalContext = context;

PathGetPropertiesOptions finalOptions = options;
return DataLakeImplUtils.returnOrConvertException(() -> {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(finalOptions.getRequestConditions()), timeout, finalContext);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue(), response));
}, LOGGER);
}

/**
* Gets if the path this client represents exists in the cloud.
* <p>Note that this method does not guarantee that the path type (file/directory) matches expectations.</p>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@
import com.azure.storage.file.datalake.models.LeaseStateType;
import com.azure.storage.file.datalake.models.LeaseStatusType;
import com.azure.storage.file.datalake.models.ListFileSystemsOptions;
import com.azure.storage.file.datalake.models.PathAccessControlEntry;
import com.azure.storage.file.datalake.models.PathDeletedItem;
import com.azure.storage.file.datalake.models.PathHttpHeaders;
import com.azure.storage.file.datalake.models.PathItem;
Expand Down Expand Up @@ -126,6 +127,7 @@ class Transforms {
public static final HttpHeaderName X_MS_GROUP = HttpHeaderName.fromString("x-ms-group");
public static final HttpHeaderName X_MS_PERMISSIONS = HttpHeaderName.fromString("x-ms-permissions");
public static final HttpHeaderName X_MS_CONTINUATION = HttpHeaderName.fromString("x-ms-continuation");
public static final HttpHeaderName X_MS_ACL = HttpHeaderName.fromString("x-ms-acl");

static {
// https://docs.oracle.com/javase/8/docs/api/java/util/Date.html#getTime--
Expand Down Expand Up @@ -338,9 +340,10 @@ static PathProperties toPathProperties(BlobProperties properties, Response<?> r)
String owner = r.getHeaders().getValue(X_MS_OWNER);
String group = r.getHeaders().getValue(X_MS_GROUP);
String permissions = r.getHeaders().getValue(X_MS_PERMISSIONS);
String acl = r.getHeaders().getValue(X_MS_ACL);

return AccessorUtility.getPathPropertiesAccessor().setPathProperties(pathProperties,
properties.getEncryptionScope(), encryptionContext, owner, group, permissions);
properties.getEncryptionScope(), encryptionContext, owner, group, permissions, acl);
}
}
}
Expand Down Expand Up @@ -452,10 +455,11 @@ static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r
return null;
}
return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(),
Transforms.toPathReadHeaders(r.getDeserializedHeaders(), r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT)));
Transforms.toPathReadHeaders(r.getDeserializedHeaders(), r.getHeaders().getValue(X_MS_ENCRYPTION_CONTEXT),
r.getHeaders().getValue(X_MS_ACL)));
}

private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext) {
private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String encryptionContext, String acl) {
if (h == null) {
return null;
}
Expand Down Expand Up @@ -491,7 +495,8 @@ private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h, String e
.setContentCrc64(h.getContentCrc64())
.setErrorCode(h.getErrorCode())
.setCreationTime(h.getCreationTime())
.setEncryptionContext(encryptionContext);
.setEncryptionContext(encryptionContext)
.setAccessControlList(PathAccessControlEntry.parseList(acl));
}

static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ private AccessorUtility() {
*/
public interface PathPropertiesAccessor {
PathProperties setPathProperties(PathProperties properties, String encryptionScope, String encryptionContext,
String owner, String group, String permissions);
String owner, String group, String permissions, String acl);
}

/**
Expand Down
Loading

0 comments on commit de9c095

Please sign in to comment.