From a4293d730c6f7a602de678221901e22516141f3e Mon Sep 17 00:00:00 2001 From: Alan Zimmer <48699787+alzimmermsft@users.noreply.github.com> Date: Mon, 15 Nov 2021 16:33:53 -0800 Subject: [PATCH] Transition Storage SDKs to New Codesnippet Tooling (#25346) Transition Storage SDKs to New Codesnippet Tooling --- .../azure/storage/blob/batch/BlobBatch.java | 74 +- .../blob/batch/BlobBatchAsyncClient.java | 60 +- .../storage/blob/batch/BlobBatchClient.java | 106 ++- .../blob/batch/BlobBatchClientBuilder.java | 12 +- .../storage/blob/batch/ReadmeSamples.java | 34 +- .../changefeed/BlobChangefeedAsyncClient.java | 26 +- .../blob/changefeed/BlobChangefeedClient.java | 45 +- .../BlobChangefeedClientBuilder.java | 12 +- .../blob/changefeed/ReadmeSamples.java | 14 +- .../EncryptedBlobAsyncClient.java | 130 ++- .../cryptography/EncryptedBlobClient.java | 74 +- .../EncryptedBlobClientBuilder.java | 20 +- .../cryptography/ReadmeSamples.java | 8 + sdk/storage/azure-storage-blob-nio/README.md | 1 - .../azure/storage/blob/nio/ReadmeSamples.java | 34 +- .../azure/storage/blob/BlobAsyncClient.java | 195 ++++- .../com/azure/storage/blob/BlobClient.java | 74 +- .../azure/storage/blob/BlobClientBuilder.java | 16 +- .../blob/BlobContainerAsyncClient.java | 309 +++++++- .../storage/blob/BlobContainerClient.java | 335 +++++++- .../blob/BlobContainerClientBuilder.java | 16 +- .../storage/blob/BlobServiceAsyncClient.java | 242 +++++- .../azure/storage/blob/BlobServiceClient.java | 240 +++++- .../specialized/AppendBlobAsyncClient.java | 119 ++- .../blob/specialized/AppendBlobClient.java | 132 +++- .../blob/specialized/BlobAsyncClientBase.java | 586 ++++++++++++-- .../blob/specialized/BlobClientBase.java | 572 ++++++++++++-- .../specialized/BlobLeaseAsyncClient.java | 165 +++- .../blob/specialized/BlobLeaseClient.java | 175 +++- .../specialized/BlobLeaseClientBuilder.java | 36 +- .../specialized/BlockBlobAsyncClient.java | 240 +++++- .../blob/specialized/BlockBlobClient.java | 248 +++++- .../blob/specialized/PageBlobAsyncClient.java | 352 ++++++++- .../blob/specialized/PageBlobClient.java | 369 ++++++++- .../SpecializedBlobClientBuilder.java | 9 +- .../com/azure/storage/blob/ReadmeSamples.java | 253 +++--- .../common/StorageSharedKeyCredential.java | 6 +- .../credentials/SasTokenCredential.java | 4 - .../DataLakeDirectoryAsyncClient.java | 154 +++- .../datalake/DataLakeDirectoryClient.java | 154 +++- .../datalake/DataLakeFileAsyncClient.java | 352 ++++++++- .../file/datalake/DataLakeFileClient.java | 321 +++++++- .../DataLakeFileSystemAsyncClient.java | 355 ++++++++- .../datalake/DataLakeFileSystemClient.java | 387 ++++++++- .../DataLakeFileSystemClientBuilder.java | 18 +- .../datalake/DataLakePathAsyncClient.java | 353 ++++++++- .../file/datalake/DataLakePathClient.java | 370 ++++++++- .../datalake/DataLakePathClientBuilder.java | 36 +- .../datalake/DataLakeServiceAsyncClient.java | 199 ++++- .../file/datalake/DataLakeServiceClient.java | 214 ++++- .../specialized/DataLakeLeaseAsyncClient.java | 106 ++- .../specialized/DataLakeLeaseClient.java | 111 ++- .../DataLakeLeaseClientBuilder.java | 54 +- .../storage/file/datalake/ReadmeSamples.java | 48 +- .../storage/file/share/ShareAsyncClient.java | 519 ++++++++++-- .../azure/storage/file/share/ShareClient.java | 452 +++++++++-- .../file/share/ShareClientBuilder.java | 58 +- .../file/share/ShareDirectoryAsyncClient.java | 389 ++++++++- .../file/share/ShareDirectoryClient.java | 349 +++++++- .../file/share/ShareFileAsyncClient.java | 747 ++++++++++++++++-- .../storage/file/share/ShareFileClient.java | 703 ++++++++++++++-- .../file/share/ShareFileClientBuilder.java | 64 +- .../file/share/ShareServiceAsyncClient.java | 221 +++++- .../file/share/ShareServiceClient.java | 200 ++++- .../file/share/ShareServiceClientBuilder.java | 54 +- .../specialized/ShareLeaseAsyncClient.java | 88 ++- .../share/specialized/ShareLeaseClient.java | 95 ++- .../specialized/ShareLeaseClientBuilder.java | 36 +- .../storage/file/share/ReadmeSamples.java | 163 ++-- .../azure/storage/queue/QueueAsyncClient.java | 374 ++++++++- .../com/azure/storage/queue/QueueClient.java | 291 ++++++- .../storage/queue/QueueClientBuilder.java | 119 ++- .../queue/QueueServiceAsyncClient.java | 182 ++++- .../storage/queue/QueueServiceClient.java | 174 +++- .../queue/QueueServiceClientBuilder.java | 117 ++- .../azure/storage/queue/ReadmeSamples.java | 109 ++- 76 files changed, 12634 insertions(+), 1445 deletions(-) diff --git a/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatch.java b/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatch.java index b32a9682d3db7..b659c5f79a7d1 100644 --- a/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatch.java +++ b/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatch.java @@ -45,7 +45,17 @@ *

Azure Storage Blob batches are homogeneous which means a {@link #deleteBlob(String) delete} and {@link * #setBlobAccessTier(String, AccessTier) set tier} are not allowed to be in the same batch.

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.illegalBatchOperation} + * + *
+ * try {
+ *     Response<Void> deleteResponse = batch.deleteBlob("{url of blob}");
+ *     Response<Void> setTierResponse = batch.setBlobAccessTier("{url of another blob}", AccessTier.HOT);
+ * } catch (UnsupportedOperationException ex) {
+ *     System.err.printf("This will fail as Azure Storage Blob batch operations are homogeneous. Exception: %s%n",
+ *         ex.getMessage());
+ * }
+ * 
+ * * *

Please refer to the Azure Docs * for more information.

@@ -109,7 +119,11 @@ public final class BlobBatch { * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob#String-String} + * + *
+     * Response<Void> deleteResponse = batch.deleteBlob("{container name}", "{blob name}");
+     * 
+ * * * @param containerName The container of the blob. * @param blobName The name of the blob. @@ -127,7 +141,14 @@ public Response deleteBlob(String containerName, String blobName) { * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob#String-String-DeleteSnapshotsOptionType-BlobRequestConditions} + * + *
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId("{lease ID}");
+     *
+     * Response<Void> deleteResponse = batch.deleteBlob("{container name}", "{blob name}",
+     *     DeleteSnapshotsOptionType.INCLUDE, blobRequestConditions);
+     * 
+ * * * @param containerName The container of the blob. * @param blobName The name of the blob. @@ -148,7 +169,11 @@ public Response deleteBlob(String containerName, String blobName, * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob#String} + * + *
+     * Response<Void> deleteResponse = batch.deleteBlob("{url of blob}");
+     * 
+ * * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @return a {@link Response} that will be used to associate this operation to the response when the batch is @@ -164,7 +189,14 @@ public Response deleteBlob(String blobUrl) { * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.deleteBlob#String-DeleteSnapshotsOptionType-BlobRequestConditions} + * + *
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId("{lease ID}");
+     *
+     * Response<Void> deleteResponse = batch.deleteBlob("{url of blob}", DeleteSnapshotsOptionType.INCLUDE,
+     *     blobRequestConditions);
+     * 
+ * * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param deleteOptions Delete options for the blob and its snapshots. @@ -190,7 +222,11 @@ private Response deleteBlobHelper(String urlPath, DeleteSnapshotsOptionTyp * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier#String-String-AccessTier} + * + *
+     * Response<Void> setTierResponse = batch.setBlobAccessTier("{container name}", "{blob name}", AccessTier.HOT);
+     * 
+ * * * @param containerName The container of the blob. * @param blobName The name of the blob. @@ -209,7 +245,12 @@ public Response setBlobAccessTier(String containerName, String blobName, A * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier#String-String-AccessTier-String} + * + *
+     * Response<Void> setTierResponse = batch.setBlobAccessTier("{container name}", "{blob name}", AccessTier.HOT,
+     *     "{lease ID}");
+     * 
+ * * * @param containerName The container of the blob. * @param blobName The name of the blob. @@ -230,7 +271,11 @@ public Response setBlobAccessTier(String containerName, String blobName, A * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier#String-AccessTier} + * + *
+     * Response<Void> setTierResponse = batch.setBlobAccessTier("{url of blob}", AccessTier.HOT);
+     * 
+ * * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. @@ -247,7 +292,11 @@ public Response setBlobAccessTier(String blobUrl, AccessTier accessTier) { * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier#String-AccessTier-String} + * + *
+     * Response<Void> setTierResponse = batch.setBlobAccessTier("{url of blob}", AccessTier.HOT, "{lease ID}");
+     * 
+ * * * @param blobUrl URL of the blob. Blob name must be encoded to UTF-8. * @param accessTier The tier to set on the blob. @@ -265,7 +314,12 @@ public Response setBlobAccessTier(String blobUrl, AccessTier accessTier, S * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatch.setBlobAccessTier#BlobBatchSetBlobAccessTierOptions} + * + *
+     * Response<Void> setTierResponse = batch.setBlobAccessTier(
+     *     new BlobBatchSetBlobAccessTierOptions("{url of blob}", AccessTier.HOT).setLeaseId("{lease ID}"));
+     * 
+ * * * @param options {@link BlobBatchSetBlobAccessTierOptions} * @return a {@link Response} that will be used to associate this operation to the response when the batch is diff --git a/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchAsyncClient.java b/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchAsyncClient.java index 4eb1a58594fd4..ae838eb9bd846 100644 --- a/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchAsyncClient.java +++ b/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchAsyncClient.java @@ -80,7 +80,21 @@ public BlobBatch getBlobBatch() { * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchAsyncClient.submitBatch#BlobBatch} + * + *
+     * BlobBatch batch = batchAsyncClient.getBlobBatch();
+     *
+     * Response<Void> deleteResponse1 = batch.deleteBlob("container", "blob1");
+     * Response<Void> deleteResponse2 = batch.deleteBlob("container", "blob2", DeleteSnapshotsOptionType.INCLUDE,
+     *     new BlobRequestConditions().setLeaseId("leaseId"));
+     *
+     * batchAsyncClient.submitBatch(batch).subscribe(response -> {
+     *     System.out.println("Batch submission completed successfully.");
+     *     System.out.printf("Delete operation 1 completed with status code: %d%n", deleteResponse1.getStatusCode());
+     *     System.out.printf("Delete operation 2 completed with status code: %d%n", deleteResponse2.getStatusCode());
+     * }, error -> System.err.printf("Batch submission failed. Error message: %s%n", error.getMessage()));
+     * 
+ * * * @param batch Batch to submit. * @return An empty response indicating that the batch operation has completed. @@ -104,7 +118,21 @@ public Mono submitBatch(BlobBatch batch) { * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchAsyncClient.submitBatch#BlobBatch-boolean} + * + *
+     * BlobBatch batch = batchAsyncClient.getBlobBatch();
+     *
+     * Response<Void> deleteResponse1 = batch.deleteBlob("container", "blob1");
+     * Response<Void> deleteResponse2 = batch.deleteBlob("container", "blob2", DeleteSnapshotsOptionType.INCLUDE,
+     *     new BlobRequestConditions().setLeaseId("leaseId"));
+     *
+     * batchAsyncClient.submitBatchWithResponse(batch, true).subscribe(response -> {
+     *     System.out.printf("Batch submission completed with status code: %d%n", response.getStatusCode());
+     *     System.out.printf("Delete operation 1 completed with status code: %d%n", deleteResponse1.getStatusCode());
+     *     System.out.printf("Delete operation 2 completed with status code: %d%n", deleteResponse2.getStatusCode());
+     * }, error -> System.err.printf("Batch submission failed. Error message: %s%n", error.getMessage()));
+     * 
+ * * * @param batch Batch to submit. * @param throwOnAnyFailure Flag to indicate if an exception should be thrown if any request in the batch fails. @@ -146,7 +174,19 @@ Mono> submitBatchWithResponse(BlobBatch batch, boolean throwOnAny * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchAsyncClient.deleteBlobs#List-DeleteSnapshotsOptionType} + * + *
+     * List<String> blobUrls = new ArrayList<>();
+     * blobUrls.add(blobClient1.getBlobUrl());
+     * blobUrls.add(blobClient2.getBlobUrl());
+     * blobUrls.add(blobClient3.getBlobUrl());
+     *
+     * batchAsyncClient.deleteBlobs(blobUrls, DeleteSnapshotsOptionType.INCLUDE).subscribe(response ->
+     *         System.out.printf("Deleting blob with URL %s completed with status code %d%n",
+     *             response.getRequest().getUrl(), response.getStatusCode()),
+     *     error -> System.err.printf("Deleting blob failed with exception: %s%n", error.getMessage()));
+     * 
+ * * * @param blobUrls Urls of the blobs to delete. Blob names must be encoded to UTF-8. * @param deleteOptions The deletion option for all blobs. @@ -180,7 +220,19 @@ private Mono>> submitDeleteBlobsBatch(List * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchAsyncClient.setBlobsAccessTier#List-AccessTier} + * + *
+     * List<String> blobUrls = new ArrayList<>();
+     * blobUrls.add(blobClient1.getBlobUrl());
+     * blobUrls.add(blobClient2.getBlobUrl());
+     * blobUrls.add(blobClient3.getBlobUrl());
+     *
+     * batchAsyncClient.setBlobsAccessTier(blobUrls, AccessTier.HOT).subscribe(response ->
+     *         System.out.printf("Setting blob access tier with URL %s completed with status code %d%n",
+     *             response.getRequest().getUrl(), response.getStatusCode()),
+     *     error -> System.err.printf("Setting blob access tier failed with exception: %s%n", error.getMessage()));
+     * 
+ * * * @param blobUrls Urls of the blobs to set their access tier. Blob names must be encoded to UTF-8. * @param accessTier {@link AccessTier} to set on each blob. diff --git a/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchClient.java b/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchClient.java index a5bb7e94f3ca0..1147f3420d02d 100644 --- a/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchClient.java +++ b/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchClient.java @@ -49,7 +49,24 @@ public BlobBatch getBlobBatch() { * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchClient.submitBatch#BlobBatch} + * + *
+     * BlobBatch batch = batchClient.getBlobBatch();
+     *
+     * Response<Void> deleteResponse1 = batch.deleteBlob("container", "blob1");
+     * Response<Void> deleteResponse2 = batch.deleteBlob("container", "blob2", DeleteSnapshotsOptionType.INCLUDE,
+     *     new BlobRequestConditions().setLeaseId("leaseId"));
+     *
+     * try {
+     *     batchClient.submitBatch(batch);
+     *     System.out.println("Batch submission completed successfully.");
+     *     System.out.printf("Delete operation 1 completed with status code: %d%n", deleteResponse1.getStatusCode());
+     *     System.out.printf("Delete operation 2 completed with status code: %d%n", deleteResponse2.getStatusCode());
+     * } catch (BlobStorageException error) {
+     *     System.err.printf("Batch submission failed. Error message: %s%n", error.getMessage());
+     * }
+     * 
+ * * * @param batch Batch to submit. * @throws BlobStorageException If the batch request is malformed. @@ -68,7 +85,24 @@ public void submitBatch(BlobBatch batch) { * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchClient.submitBatch#BlobBatch-boolean-Duration-Context} + * + *
+     * BlobBatch batch = batchClient.getBlobBatch();
+     *
+     * Response<Void> deleteResponse1 = batch.deleteBlob("container", "blob1");
+     * Response<Void> deleteResponse2 = batch.deleteBlob("container", "blob2", DeleteSnapshotsOptionType.INCLUDE,
+     *     new BlobRequestConditions().setLeaseId("leaseId"));
+     *
+     * try {
+     *     System.out.printf("Batch submission completed with status code: %d%n",
+     *         batchClient.submitBatchWithResponse(batch, true, timeout, Context.NONE).getStatusCode());
+     *     System.out.printf("Delete operation 1 completed with status code: %d%n", deleteResponse1.getStatusCode());
+     *     System.out.printf("Delete operation 2 completed with status code: %d%n", deleteResponse2.getStatusCode());
+     * } catch (BlobStorageException error) {
+     *     System.err.printf("Batch submission failed. Error message: %s%n", error.getMessage());
+     * }
+     * 
+ * * * @param batch Batch to submit. * @param throwOnAnyFailure Flag to indicate if an exception should be thrown if any request in the batch fails. @@ -93,7 +127,22 @@ public Response submitBatchWithResponse(BlobBatch batch, boolean throwOnAn * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchClient.deleteBlobs#List-DeleteSnapshotsOptionType} + * + *
+     * List<String> blobUrls = new ArrayList<>();
+     * blobUrls.add(blobClient1.getBlobUrl());
+     * blobUrls.add(blobClient2.getBlobUrl());
+     * blobUrls.add(blobClient3.getBlobUrl());
+     *
+     * try {
+     *     batchClient.deleteBlobs(blobUrls, DeleteSnapshotsOptionType.INCLUDE).forEach(response ->
+     *         System.out.printf("Deleting blob with URL %s completed with status code %d%n",
+     *             response.getRequest().getUrl(), response.getStatusCode()));
+     * } catch (Throwable error) {
+     *     System.err.printf("Deleting blob failed with exception: %s%n", error.getMessage());
+     * }
+     * 
+ * * * @param blobUrls Urls of the blobs to delete. Blob names must be encoded to UTF-8. * @param deleteOptions The deletion option for all blobs. @@ -111,7 +160,22 @@ public PagedIterable> deleteBlobs(List blobUrls, DeleteSn * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchClient.deleteBlobs#List-DeleteSnapshotsOptionType-Duration-Context} + * + *
+     * List<String> blobUrls = new ArrayList<>();
+     * blobUrls.add(blobClient1.getBlobUrl());
+     * blobUrls.add(blobClient2.getBlobUrl());
+     * blobUrls.add(blobClient3.getBlobUrl());
+     *
+     * try {
+     *     batchClient.deleteBlobs(blobUrls, DeleteSnapshotsOptionType.INCLUDE, timeout, Context.NONE)
+     *         .forEach(response -> System.out.printf("Deleting blob with URL %s completed with status code %d%n",
+     *             response.getRequest().getUrl(), response.getStatusCode()));
+     * } catch (Throwable error) {
+     *     System.err.printf("Deleting blob failed with exception: %s%n", error.getMessage());
+     * }
+     * 
+ * * * @param blobUrls Urls of the blobs to delete. Blob names must be encoded to UTF-8. * @param deleteOptions The deletion option for all blobs. @@ -133,7 +197,22 @@ public PagedIterable> deleteBlobs(List blobUrls, DeleteSn * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchClient.setBlobsAccessTier#List-AccessTier} + * + *
+     * List<String> blobUrls = new ArrayList<>();
+     * blobUrls.add(blobClient1.getBlobUrl());
+     * blobUrls.add(blobClient2.getBlobUrl());
+     * blobUrls.add(blobClient3.getBlobUrl());
+     *
+     * try {
+     *     batchClient.setBlobsAccessTier(blobUrls, AccessTier.HOT).forEach(response ->
+     *         System.out.printf("Setting blob access tier with URL %s completed with status code %d%n",
+     *             response.getRequest().getUrl(), response.getStatusCode()));
+     * } catch (Throwable error) {
+     *     System.err.printf("Setting blob access tier failed with exception: %s%n", error.getMessage());
+     * }
+     * 
+ * * * @param blobUrls Urls of the blobs to set their access tier. Blob names must be encoded to UTF-8. * @param accessTier {@link AccessTier} to set on each blob. @@ -151,7 +230,22 @@ public PagedIterable> setBlobsAccessTier(List blobUrls, A * *

Code samples

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchClient.setBlobsAccessTier#List-AccessTier-Duration-Context} + * + *
+     * List<String> blobUrls = new ArrayList<>();
+     * blobUrls.add(blobClient1.getBlobUrl());
+     * blobUrls.add(blobClient2.getBlobUrl());
+     * blobUrls.add(blobClient3.getBlobUrl());
+     *
+     * try {
+     *     batchClient.setBlobsAccessTier(blobUrls, AccessTier.HOT, timeout, Context.NONE).forEach(response ->
+     *         System.out.printf("Setting blob access tier with URL %s completed with status code %d%n",
+     *             response.getRequest().getUrl(), response.getStatusCode()));
+     * } catch (Throwable error) {
+     *     System.err.printf("Setting blob access tier failed with exception: %s%n", error.getMessage());
+     * }
+     * 
+ * * * @param blobUrls Urls of the blobs to set their access tier. Blob names must be encoded to UTF-8. * @param accessTier {@link AccessTier} to set on each blob. diff --git a/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchClientBuilder.java b/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchClientBuilder.java index 9e10599c1bdc5..511cea147a754 100644 --- a/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchClientBuilder.java +++ b/sdk/storage/azure-storage-blob-batch/src/main/java/com/azure/storage/blob/batch/BlobBatchClientBuilder.java @@ -99,7 +99,11 @@ public BlobBatchClientBuilder(BlobContainerAsyncClient client) { * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchClientBuilder#buildClient} + * + *
+     * BlobBatchClient batchClient = new BlobBatchClientBuilder(blobServiceClient).buildClient();
+     * 
+ * * * @return a {@link BlobBatchClient} created from the configurations in this builder. */ @@ -112,7 +116,11 @@ public BlobBatchClient buildClient() { * *

Code sample

* - * {@codesnippet com.azure.storage.blob.batch.BlobBatchClientBuilder#buildAsyncClient} + * + *
+     * BlobBatchAsyncClient batchClient = new BlobBatchClientBuilder(blobServiceAsyncClient).buildAsyncClient();
+     * 
+ * * * @return a {@link BlobBatchAsyncClient} created from the configurations in this builder. */ diff --git a/sdk/storage/azure-storage-blob-batch/src/samples/java/com/azure/storage/blob/batch/ReadmeSamples.java b/sdk/storage/azure-storage-blob-batch/src/samples/java/com/azure/storage/blob/batch/ReadmeSamples.java index 847b3fc54e476..f88eee9345a85 100644 --- a/sdk/storage/azure-storage-blob-batch/src/samples/java/com/azure/storage/blob/batch/ReadmeSamples.java +++ b/sdk/storage/azure-storage-blob-batch/src/samples/java/com/azure/storage/blob/batch/ReadmeSamples.java @@ -22,14 +22,22 @@ * * Code samples for the README.md */ +@SuppressWarnings("unused") public class ReadmeSamples { - private BlobServiceClient blobServiceClient = new BlobServiceClientBuilder().endpoint("").sasToken("").buildClient(); - private BlobBatchClient blobBatchClient = new BlobBatchClientBuilder(blobServiceClient).buildClient(); - private String blobUrl = String.format("https://%s.blob.core.windows.net/containerName/blobName", ""); - private String blobUrl2 = String.format("https://%s.blob.core.windows.net/containerName/blobName2", ""); - private String blobUrlWithSnapshot = String.format("https://%s.blob.core.windows.net/containerName/blobName?snapshot=", ""); - private String blobUrlWithLease = String.format("https://%s.blob.core.windows.net/containerName/blobNameWithLease", ""); - private List blobUrls = Arrays.asList(blobUrl, blobUrl2, blobUrlWithSnapshot, blobUrlWithLease); + private final BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() + .endpoint("") + .sasToken("") + .buildClient(); + private final BlobBatchClient blobBatchClient = new BlobBatchClientBuilder(blobServiceClient).buildClient(); + private final String blobUrl = String.format( + "https://%s.blob.core.windows.net/containerName/blobName", ""); + private final String blobUrl2 = String.format( + "https://%s.blob.core.windows.net/containerName/blobName2", ""); + private final String blobUrlWithSnapshot = String.format( + "https://%s.blob.core.windows.net/containerName/blobName?snapshot=", ""); + private final String blobUrlWithLease = String.format( + "https://%s.blob.core.windows.net/containerName/blobNameWithLease", ""); + private final List blobUrls = Arrays.asList(blobUrl, blobUrl2, blobUrlWithSnapshot, blobUrlWithLease); public void createHttpClient() { HttpClient client = new NettyAsyncHttpClientBuilder() @@ -39,22 +47,29 @@ public void createHttpClient() { } public void creatingBlobBatchClient() { + // BEGIN: readme-sample-creatingBlobBatchClient BlobBatchClient blobBatchClient = new BlobBatchClientBuilder(blobServiceClient).buildClient(); + // END: readme-sample-creatingBlobBatchClient } public void bulkDeletingBlobs() { + // BEGIN: readme-sample-bulkDeletingBlobs blobBatchClient.deleteBlobs(blobUrls, DeleteSnapshotsOptionType.INCLUDE).forEach(response -> System.out.printf("Deleting blob with URL %s completed with status code %d%n", response.getRequest().getUrl(), response.getStatusCode())); + // END: readme-sample-bulkDeletingBlobs } public void bulkSettingAccessTier() { + // BEGIN: readme-sample-bulkSettingAccessTier blobBatchClient.setBlobsAccessTier(blobUrls, AccessTier.HOT).forEach(response -> System.out.printf("Setting blob access tier with URL %s completed with status code %d%n", response.getRequest().getUrl(), response.getStatusCode())); + // END: readme-sample-bulkSettingAccessTier } public void advancedBatchingDelete() { + // BEGIN: readme-sample-advancedBatchingDelete BlobBatch blobBatch = blobBatchClient.getBlobBatch(); // Delete a blob. @@ -75,9 +90,11 @@ public void advancedBatchingDelete() { deleteSnapshotResponse.getStatusCode()); System.out.printf("Deleting blob with lease completed with status code %d%n", deleteWithLeaseResponse.getStatusCode()); + // END: readme-sample-advancedBatchingDelete } public void advancedBatchingSetTier() { + // BEGIN: readme-sample-advancedBatchingSetTier BlobBatch blobBatch = blobBatchClient.getBlobBatch(); // Set AccessTier on a blob. @@ -95,12 +112,15 @@ public void advancedBatchingSetTier() { System.out.printf("Set AccessTier on blob completed with status code %d%n", setTierResponse2.getStatusCode()); System.out.printf("Set AccessTier on blob with lease completed with status code %d%n", setTierWithLeaseResponse.getStatusCode()); + // END: readme-sample-advancedBatchingSetTier } public void deleteBlobWithLease() { + // BEGIN: readme-sample-deleteBlobWithLease BlobBatch blobBatch = blobBatchClient.getBlobBatch(); Response deleteWithLeaseResponse = blobBatch.deleteBlob(blobUrlWithLease, DeleteSnapshotsOptionType.INCLUDE, new BlobRequestConditions() .setLeaseId("leaseId")); + // END: readme-sample-deleteBlobWithLease } } diff --git a/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedAsyncClient.java b/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedAsyncClient.java index de256d8854200..772dbf219abc9 100644 --- a/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedAsyncClient.java +++ b/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedAsyncClient.java @@ -40,7 +40,12 @@ public class BlobChangefeedAsyncClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedAsyncClient.getEvents} + * + *
+     * client.getEvents().subscribe(event ->
+     *     System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject()));
+     * 
+ * * * @return A reactive response emitting the changefeed events. */ @@ -60,7 +65,15 @@ public BlobChangefeedPagedFlux getEvents() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedAsyncClient.getEvents#OffsetDateTime-OffsetDateTime} + * + *
+     * OffsetDateTime startTime = OffsetDateTime.MIN;
+     * OffsetDateTime endTime = OffsetDateTime.now();
+     *
+     * client.getEvents(startTime, endTime).subscribe(event ->
+     *     System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject()));
+     * 
+ * * * @param startTime Filters the results to return events approximately after the start time. Note: A few events * belonging to the previous hour can also be returned. A few events belonging to this hour can be missing; to @@ -86,7 +99,14 @@ public BlobChangefeedPagedFlux getEvents(OffsetDateTime startTime, OffsetDateTim * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedAsyncClient.getEvents#String} + * + *
+     * String cursor = "cursor";
+     *
+     * client.getEvents(cursor).subscribe(event ->
+     *     System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject()));
+     * 
+ * * * @param cursor Identifies the portion of the events to be returned with the next get operation. Events that * take place after the event identified by the cursor will be returned. diff --git a/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedClient.java b/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedClient.java index 9e5042b76490b..03ef27fa92e64 100644 --- a/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedClient.java +++ b/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedClient.java @@ -42,7 +42,12 @@ public class BlobChangefeedClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedClient.getEvents} + * + *
+     * client.getEvents().forEach(event ->
+     *     System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject()));
+     * 
+ * * * @return The changefeed events. */ @@ -64,7 +69,15 @@ public BlobChangefeedPagedIterable getEvents() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedClient.getEvents#OffsetDateTime-OffsetDateTime} + * + *
+     * OffsetDateTime startTime = OffsetDateTime.MIN;
+     * OffsetDateTime endTime = OffsetDateTime.now();
+     *
+     * client.getEvents(startTime, endTime).forEach(event ->
+     *     System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject()));
+     * 
+ * * * @param startTime Filters the results to return events approximately after the start time. Note: A few events * belonging to the previous hour can also be returned. A few events belonging to this hour can be missing; to @@ -92,7 +105,15 @@ public BlobChangefeedPagedIterable getEvents(OffsetDateTime startTime, OffsetDat * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedClient.getEvents#OffsetDateTime-OffsetDateTime-Context} + * + *
+     * OffsetDateTime startTime = OffsetDateTime.MIN;
+     * OffsetDateTime endTime = OffsetDateTime.now();
+     *
+     * client.getEvents(startTime, endTime, new Context("key", "value")).forEach(event ->
+     *     System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject()));
+     * 
+ * * * @param startTime Filters the results to return events approximately after the start time. Note: A few events * belonging to the previous hour can also be returned. A few events belonging to this hour can be missing; to @@ -121,7 +142,14 @@ public BlobChangefeedPagedIterable getEvents(OffsetDateTime startTime, OffsetDat * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedClient.getEvents#String} + * + *
+     * String cursor = "cursor";
+     *
+     * client.getEvents(cursor).forEach(event ->
+     *     System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject()));
+     * 
+ * * * @param cursor Identifies the portion of the events to be returned with the next get operation. Events that * take place after the event identified by the cursor will be returned. @@ -145,7 +173,14 @@ public BlobChangefeedPagedIterable getEvents(String cursor) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedClient.getEvents#String-Context} + * + *
+     * String cursor = "cursor";
+     *
+     * client.getEvents(cursor, new Context("key", "value")).forEach(event ->
+     *     System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject()));
+     * 
+ * * * @param cursor Identifies the portion of the events to be returned with the next get operation. Events that * take place after the event identified by the cursor will be returned. diff --git a/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedClientBuilder.java b/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedClientBuilder.java index dc6bd2913e977..356203bdb9179 100644 --- a/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedClientBuilder.java +++ b/sdk/storage/azure-storage-blob-changefeed/src/main/java/com/azure/storage/blob/changefeed/BlobChangefeedClientBuilder.java @@ -70,7 +70,11 @@ public BlobChangefeedClientBuilder(BlobServiceAsyncClient client) { * *

Code sample

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedClientBuilder#buildClient} + * + *
+     * BlobChangefeedClient changefeedClient = new BlobChangefeedClientBuilder(blobServiceClient).buildClient();
+     * 
+ * *F * @return a {@link BlobChangefeedClient} created from the configurations in this builder. */ @@ -83,7 +87,11 @@ public BlobChangefeedClient buildClient() { * *

Code sample

* - * {@codesnippet com.azure.storage.blob.changefeed.BlobChangefeedClientBuilder#buildAsyncClient} + * + *
+     * BlobChangefeedAsyncClient changefeedClient = new BlobChangefeedClientBuilder(blobServiceAsyncClient).buildAsyncClient();
+     * 
+ * * * @return a {@link BlobChangefeedAsyncClient} created from the configurations in this builder. */ diff --git a/sdk/storage/azure-storage-blob-changefeed/src/samples/java/com/azure/storage/blob/changefeed/ReadmeSamples.java b/sdk/storage/azure-storage-blob-changefeed/src/samples/java/com/azure/storage/blob/changefeed/ReadmeSamples.java index 8339e044f1ca5..0f321bbed9afe 100644 --- a/sdk/storage/azure-storage-blob-changefeed/src/samples/java/com/azure/storage/blob/changefeed/ReadmeSamples.java +++ b/sdk/storage/azure-storage-blob-changefeed/src/samples/java/com/azure/storage/blob/changefeed/ReadmeSamples.java @@ -19,27 +19,34 @@ */ public class ReadmeSamples { - private BlobServiceClient blobServiceClient = new BlobServiceClientBuilder().buildClient(); + private final BlobServiceClient blobServiceClient = new BlobServiceClientBuilder().buildClient(); private BlobChangefeedClient client = new BlobChangefeedClientBuilder(blobServiceClient).buildClient(); public void getClient() { + // BEGIN: readme-sample-getClient client = new BlobChangefeedClientBuilder(blobServiceClient).buildClient(); + // END: readme-sample-getClient } public void changefeed() { + // BEGIN: readme-sample-changefeed client.getEvents().forEach(event -> System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject())); + // END: readme-sample-changefeed } public void changefeedBetweenDates() { + // BEGIN: readme-sample-changefeedBetweenDates OffsetDateTime startTime = OffsetDateTime.MIN; OffsetDateTime endTime = OffsetDateTime.now(); client.getEvents(startTime, endTime).forEach(event -> System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject())); + // END: readme-sample-changefeedBetweenDates } public void changefeedResumeWithCursor() { + // BEGIN: readme-sample-changefeedResumeWithCursor BlobChangefeedPagedIterable iterable = client.getEvents(); Iterable pages = iterable.iterableByPage(); @@ -57,10 +64,12 @@ public void changefeedResumeWithCursor() { /* Resume iterating from the pervious position with the cursor. */ client.getEvents(cursor).forEach(event -> System.out.printf("Topic: %s, Subject: %s%n", event.getTopic(), event.getSubject())); + // END: readme-sample-changefeedResumeWithCursor } public void changefeedPollForEventsWithCursor() { - List changefeedEvents = new ArrayList(); + // BEGIN: readme-sample-changefeedPollForEventsWithCursor + List changefeedEvents = new ArrayList<>(); /* Get the start time. The change feed client will round start time down to the nearest hour if you provide an OffsetDateTime with minutes and seconds. */ @@ -94,6 +103,7 @@ public void changefeedPollForEventsWithCursor() { /* Resume from last continuation token and fetch latest set of events. */ pages = client.getEvents(continuationToken).iterableByPage(); } + // END: readme-sample-changefeedPollForEventsWithCursor } } diff --git a/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobAsyncClient.java b/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobAsyncClient.java index ede5052d214bc..24c09c139a815 100644 --- a/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobAsyncClient.java +++ b/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobAsyncClient.java @@ -179,7 +179,16 @@ public EncryptedBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvid * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload#Flux-ParallelTransferOptions} + * + *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency);
+     * client.upload(data, parallelTransferOptions).subscribe(response ->
+     *     System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected @@ -221,7 +230,17 @@ public Mono upload(Flux data, ParallelTransferOptions * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload#Flux-ParallelTransferOptions-boolean} + * + *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency);
+     * boolean overwrite = false; // Default behavior
+     * client.upload(data, parallelTransferOptions, overwrite).subscribe(response ->
+     *     System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected @@ -273,7 +292,26 @@ public Mono upload(Flux data, ParallelTransferOptions * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse#Flux-ParallelTransferOptions-BlobHttpHeaders-Map-AccessTier-BlobRequestConditions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = new HashMap<>(Collections.singletonMap("metadata", "value"));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency);
+     *
+     * client.uploadWithResponse(data, parallelTransferOptions, headers, metadata, AccessTier.HOT, requestConditions)
+     *     .subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected @@ -319,7 +357,29 @@ public Mono> uploadWithResponse(Flux data, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse#BlobParallelUploadOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = new HashMap<>(Collections.singletonMap("metadata", "value"));
+     * Map<String, String> tags = new HashMap<>(Collections.singletonMap("tag", "value"));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency);
+     *
+     * client.uploadWithResponse(new BlobParallelUploadOptions(data)
+     *     .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
+     *     .setTags(tags).setTier(AccessTier.HOT).setRequestConditions(requestConditions))
+     *     .subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
+     * 
+ * * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. @@ -355,7 +415,13 @@ public Mono> uploadWithResponse(BlobParallelUploadOption * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile#String} + * + *
+     * client.uploadFromFile(filePath)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param filePath Path to the upload file * @return An empty response @@ -376,7 +442,14 @@ public Mono uploadFromFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile#String-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.uploadFromFile(filePath, overwrite)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param filePath Path to the upload file * @param overwrite Whether to overwrite should the blob exist. @@ -406,7 +479,26 @@ public Mono uploadFromFile(String filePath, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile#String-ParallelTransferOptions-BlobHttpHeaders-Map-AccessTier-BlobRequestConditions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = new HashMap<>(Collections.singletonMap("metadata", "value"));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize);
+     *
+     * client.uploadFromFile(filePath, parallelTransferOptions, headers, metadata, AccessTier.HOT, requestConditions)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. @@ -436,7 +528,29 @@ public Mono uploadFromFile(String filePath, ParallelTransferOptions parall * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse#BlobUploadFromFileOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = new HashMap<>(Collections.singletonMap("metadata", "value"));
+     * Map<String, String> tags = new HashMap<>(Collections.singletonMap("tag", "value"));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize);
+     *
+     * client.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
+     *     .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata).setTags(tags)
+     *     .setTier(AccessTier.HOT).setRequestConditions(requestConditions))
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. diff --git a/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobClient.java b/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobClient.java index 3fcda9b9d04f8..f43a046ee714b 100644 --- a/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobClient.java +++ b/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobClient.java @@ -182,7 +182,16 @@ public BlobOutputStream getBlobOutputStream(BlockBlobOutputStreamOptions options * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClient.uploadFromFile#String} + * + *
+     * try {
+     *     client.uploadFromFile(filePath);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param filePath Path of the file to upload */ @@ -197,7 +206,17 @@ public void uploadFromFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClient.uploadFromFile#String-boolean} + * + *
+     * try {
+     *     boolean overwrite = false; // Default value
+     *     client.uploadFromFile(filePath, overwrite);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param filePath Path of the file to upload * @param overwrite Whether or not to overwrite should data already exist on the blob @@ -216,7 +235,29 @@ public void uploadFromFile(String filePath, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClient.uploadFromFile#String-ParallelTransferOptions-BlobHttpHeaders-Map-AccessTier-BlobRequestConditions-Duration} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = new HashMap<>(Collections.singletonMap("metadata", "value"));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * long blockSize = 100 * 1024 * 1024; // 100 MB;
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize);
+     *
+     * try {
+     *     client.uploadFromFile(filePath, parallelTransferOptions, headers, metadata, AccessTier.HOT,
+     *         requestConditions, timeout);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param filePath Path of the file to upload * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel @@ -245,7 +286,32 @@ public void uploadFromFile(String filePath, ParallelTransferOptions parallelTran * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClient.uploadFromFileWithResponse#BlobUploadFromFileOptions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = new HashMap<>(Collections.singletonMap("metadata", "value"));
+     * Map<String, String> tags = new HashMap<>(Collections.singletonMap("tag", "value"));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * long blockSize = 100 * 1024 * 1024; // 100 MB;
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize);
+     *
+     * try {
+     *     client.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
+     *         .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
+     *         .setTags(tags).setTier(AccessTier.HOT).setRequestConditions(requestConditions), timeout,
+     *         Context.NONE);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param options {@link BlobUploadFromFileOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. diff --git a/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobClientBuilder.java b/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobClientBuilder.java index b0c85973f27c4..9384f33b32694 100644 --- a/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobClientBuilder.java +++ b/sdk/storage/azure-storage-blob-cryptography/src/main/java/com/azure/storage/blob/specialized/cryptography/EncryptedBlobClientBuilder.java @@ -139,7 +139,15 @@ public EncryptedBlobClientBuilder() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient} + * + *
+     * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder()
+     *     .key(key, keyWrapAlgorithm)
+     *     .keyResolver(keyResolver)
+     *     .connectionString(connectionString)
+     *     .buildEncryptedBlobAsyncClient();
+     * 
+ * * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. @@ -154,7 +162,15 @@ public EncryptedBlobClient buildEncryptedBlobClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient} + * + *
+     * EncryptedBlobClient client = new EncryptedBlobClientBuilder()
+     *     .key(key, keyWrapAlgorithm)
+     *     .keyResolver(keyResolver)
+     *     .connectionString(connectionString)
+     *     .buildEncryptedBlobClient();
+     * 
+ * * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. diff --git a/sdk/storage/azure-storage-blob-cryptography/src/samples/java/com/azure/storage/blob/specialized/cryptography/ReadmeSamples.java b/sdk/storage/azure-storage-blob-cryptography/src/samples/java/com/azure/storage/blob/specialized/cryptography/ReadmeSamples.java index 21a925b94634e..34e27d3f7eec6 100644 --- a/sdk/storage/azure-storage-blob-cryptography/src/samples/java/com/azure/storage/blob/specialized/cryptography/ReadmeSamples.java +++ b/sdk/storage/azure-storage-blob-cryptography/src/samples/java/com/azure/storage/blob/specialized/cryptography/ReadmeSamples.java @@ -41,14 +41,17 @@ public class ReadmeSamples { private String keyName; public void getEncryptedBlobClientBlobClient() { + // BEGIN: readme-sample-getEncryptedBlobClientBlobClient EncryptedBlobClient client = new EncryptedBlobClientBuilder() .key(key, keyWrapAlgorithm) .keyResolver(keyResolver) .blobClient(blobClient) .buildEncryptedBlobClient(); + // END: readme-sample-getEncryptedBlobClientBlobClient } public void getEncryptedBlobClient() { + // BEGIN: readme-sample-getEncryptedBlobClient EncryptedBlobClient client = new EncryptedBlobClientBuilder() .key(key, keyWrapAlgorithm) .keyResolver(keyResolver) @@ -56,9 +59,11 @@ public void getEncryptedBlobClient() { .containerName(containerName) .blobName(blobName) .buildEncryptedBlobClient(); + // END: readme-sample-getEncryptedBlobClient } public void getClientLocalKey() { + // BEGIN: readme-sample-getClientLocalKey JsonWebKey localKey = JsonWebKey.fromAes(new SecretKeySpec(keyBytes, secretKeyAlgorithm), Arrays.asList(KeyOperation.WRAP_KEY, KeyOperation.UNWRAP_KEY)) .setId("my-id"); @@ -71,9 +76,11 @@ public void getClientLocalKey() { .containerName(containerName) .blobName(blobName) .buildEncryptedBlobClient(); + // END: readme-sample-getClientLocalKey } public void getClientKeyVaultKey() { + // BEGIN: readme-sample-getClientKeyVaultKey KeyClient keyClient = new KeyClientBuilder() .vaultUrl(keyVaultUrl) .credential(tokenCredential) @@ -92,6 +99,7 @@ public void getClientKeyVaultKey() { .containerName(containerName) .blobName(blobName) .buildEncryptedBlobClient(); + // END: readme-sample-getClientKeyVaultKey } } diff --git a/sdk/storage/azure-storage-blob-nio/README.md b/sdk/storage/azure-storage-blob-nio/README.md index 0b31dbbd6d580..ffb95d920675d 100644 --- a/sdk/storage/azure-storage-blob-nio/README.md +++ b/sdk/storage/azure-storage-blob-nio/README.md @@ -194,7 +194,6 @@ to guarantee that the data is available to be read. try (OutputStream os = Files.newOutputStream(filePath)) { os.write(0); } -``` ### Copy a file diff --git a/sdk/storage/azure-storage-blob-nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java b/sdk/storage/azure-storage-blob-nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java index 8b84599807942..b3732ae9d585f 100644 --- a/sdk/storage/azure-storage-blob-nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java +++ b/sdk/storage/azure-storage-blob-nio/src/samples/java/com/azure/storage/blob/nio/ReadmeSamples.java @@ -37,62 +37,84 @@ public ReadmeSamples() throws URISyntaxException, IOException { } public void createAFileSystem() throws URISyntaxException, IOException { + // BEGIN: readme-sample-createAFileSystem Map config = new HashMap<>(); String stores = ","; // A comma separated list of container names StorageSharedKeyCredential credential = new StorageSharedKeyCredential(" attributes = Files.readAttributes(filePath, "azureBlob:metadata,headers"); + // END: readme-sample-readAttributesOnAFileString } public void writeAttributesToAFile() throws IOException { + // BEGIN: readme-sample-writeAttributesToAFile AzureBlobFileAttributeView view = Files.getFileAttributeView(filePath, AzureBlobFileAttributeView.class); view.setMetadata(Collections.emptyMap()); + // END: readme-sample-writeAttributesToAFile } public void writeAttributesToAFileString() throws IOException { + // BEGIN: readme-sample-writeAttributesToAFileString Files.setAttribute(filePath, "azureBlob:blobHttpHeaders", new BlobHttpHeaders()); + // END: readme-sample-writeAttributesToAFileString } } diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobAsyncClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobAsyncClient.java index ffb1a610521bb..3b57291b54fc8 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobAsyncClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobAsyncClient.java @@ -310,7 +310,16 @@ private SpecializedBlobClientBuilder prepareBuilder() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.upload#Flux-ParallelTransferOptions} + * + *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency);
+     * client.upload(data, parallelTransferOptions).subscribe(response ->
+     *     System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected @@ -351,7 +360,17 @@ public Mono upload(Flux data, ParallelTransferOptions * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.upload#Flux-ParallelTransferOptions-boolean} + * + *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency);
+     * boolean overwrite = false; // Default behavior
+     * client.upload(data, parallelTransferOptions, overwrite).subscribe(response ->
+     *     System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected @@ -391,7 +410,13 @@ public Mono upload(Flux data, ParallelTransferOptions * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.upload#BinaryData} + * + *
+     * client.upload(BinaryData.fromString("Data!")).subscribe(response ->
+     *     System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. * @return A reactive response containing the information of the uploaded block blob. @@ -410,7 +435,14 @@ public Mono upload(BinaryData data) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.upload#BinaryData-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.upload(BinaryData.fromString("Data!"), overwrite).subscribe(response ->
+     *     System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. * @param overwrite Whether or not to overwrite, should the blob already exist. @@ -467,11 +499,52 @@ public Mono upload(BinaryData data, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.uploadWithResponse#Flux-ParallelTransferOptions-BlobHttpHeaders-Map-AccessTier-BlobRequestConditions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency);
+     *
+     * client.uploadWithResponse(data, parallelTransferOptions, headers, metadata, AccessTier.HOT, requestConditions)
+     *     .subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
+     * 
+ * * *

Using Progress Reporting

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.uploadWithResponse#Flux-ParallelTransferOptions-BlobHttpHeaders-Map-AccessTier-BlobRequestConditions.ProgressReporter} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency)
+     *     .setProgressReceiver(bytesTransferred -> System.out.printf("Upload progress: %s bytes sent", bytesTransferred));
+     *
+     * client.uploadWithResponse(data, parallelTransferOptions, headers, metadata, AccessTier.HOT, requestConditions)
+     *     .subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected @@ -519,11 +592,57 @@ public Mono> uploadWithResponse(Flux data, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.uploadWithResponse#BlobParallelUploadOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency).setProgressReceiver(bytesTransferred ->
+     *         System.out.printf("Upload progress: %s bytes sent", bytesTransferred));
+     *
+     * client.uploadWithResponse(new BlobParallelUploadOptions(data)
+     *     .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata).setTags(tags)
+     *     .setTier(AccessTier.HOT).setRequestConditions(requestConditions))
+     *     .subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
+     * 
+ * * *

Using Progress Reporting

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.uploadWithResponse#BlobParallelUploadOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize)
+     *     .setMaxConcurrency(maxConcurrency).setProgressReceiver(bytesTransferred ->
+     *         System.out.printf("Upload progress: %s bytes sent", bytesTransferred));
+     *
+     * client.uploadWithResponse(new BlobParallelUploadOptions(data)
+     *     .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata).setTags(tags)
+     *     .setTier(AccessTier.HOT).setRequestConditions(requestConditions))
+     *     .subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
+     * 
+ * * * @param options {@link BlobParallelUploadOptions}. Unlike other upload methods, this method does not require that * the {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not @@ -689,7 +808,13 @@ private Mono> uploadInChunks(BlockBlobAsyncClient blockB * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.uploadFromFile#String} + * + *
+     * client.uploadFromFile(filePath)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param filePath Path to the upload file * @return An empty response @@ -710,7 +835,14 @@ public Mono uploadFromFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.uploadFromFile#String-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.uploadFromFile(filePath, overwrite)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite, should the blob already exist. @@ -749,7 +881,25 @@ public Mono uploadFromFile(String filePath, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.uploadFromFile#String-ParallelTransferOptions-BlobHttpHeaders-Map-AccessTier-BlobRequestConditions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.uploadFromFile(filePath,
+     *     new ParallelTransferOptions().setBlockSizeLong(BlockBlobClient.MAX_STAGE_BLOCK_BYTES_LONG),
+     *     headers, metadata, AccessTier.HOT, requestConditions)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel @@ -780,7 +930,28 @@ public Mono uploadFromFile(String filePath, ParallelTransferOptions parall * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.uploadFromFileWithResponse#BlobUploadFromFileOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
+     *     .setParallelTransferOptions(
+     *         new ParallelTransferOptions().setBlockSizeLong(BlobAsyncClient.BLOB_MAX_UPLOAD_BLOCK_SIZE))
+     *     .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT)
+     *     .setRequestConditions(requestConditions))
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClient.java index 7a2557fc15429..b2f93561ece2b 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClient.java @@ -311,7 +311,16 @@ public Response uploadWithResponse(BlobParallelUploadOptions opti * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobClient.uploadFromFile#String} + * + *
+     * try {
+     *     client.uploadFromFile(filePath);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param filePath Path of the file to upload * @throws UncheckedIOException If an I/O error occurs @@ -326,7 +335,17 @@ public void uploadFromFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobClient.uploadFromFile#String-boolean} + * + *
+     * try {
+     *     boolean overwrite = false;
+     *     client.uploadFromFile(filePath, overwrite);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param filePath Path of the file to upload * @param overwrite Whether or not to overwrite, should the blob already exist @@ -354,7 +373,29 @@ public void uploadFromFile(String filePath, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobClient.uploadFromFile#String-ParallelTransferOptions-BlobHttpHeaders-Map-AccessTier-BlobRequestConditions-Duration} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Long blockSize = 100L * 1024L * 1024L; // 100 MB;
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize);
+     *
+     * try {
+     *     client.uploadFromFile(filePath, parallelTransferOptions, headers, metadata,
+     *         AccessTier.HOT, requestConditions, timeout);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param filePath Path of the file to upload * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel @@ -383,7 +424,32 @@ public void uploadFromFile(String filePath, ParallelTransferOptions parallelTran * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobClient.uploadFromFileWithResponse#BlobUploadFromFileOptions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Long blockSize = 100 * 1024 * 1024L; // 100 MB;
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize);
+     *
+     * try {
+     *     client.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath)
+     *         .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata)
+     *         .setTags(tags).setTier(AccessTier.HOT).setRequestConditions(requestConditions), timeout,
+     *         new Context(key2, value2));
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param options {@link BlobUploadFromFileOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClientBuilder.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClientBuilder.java index 080e821a0afe6..21d35eaa40af0 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClientBuilder.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobClientBuilder.java @@ -92,7 +92,13 @@ public BlobClientBuilder() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobClientBuilder.buildClient} + * + *
+     * BlobClient client = new BlobClientBuilder()
+     *     .connectionString(connectionString)
+     *     .buildClient();
+     * 
+ * * * @return a {@link BlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint} or {@code blobName} is {@code null}. @@ -109,7 +115,13 @@ public BlobClient buildClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobClientBuilder.buildAsyncClient} + * + *
+     * BlobAsyncClient client = new BlobClientBuilder()
+     *     .connectionString(connectionString)
+     *     .buildAsyncClient();
+     * 
+ * * * @return a {@link BlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint} or {@code blobName} is {@code null}. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerAsyncClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerAsyncClient.java index 99e495de8aea8..c1308265e74e6 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerAsyncClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerAsyncClient.java @@ -144,7 +144,11 @@ public final class BlobContainerAsyncClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient#String} + * + *
+     * BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName);
+     * 
+ * * * @param blobName A {@code String} representing the name of the blob. If the blob name contains special characters, * pass in the url encoded version of the blob name. @@ -160,7 +164,11 @@ public BlobAsyncClient getBlobAsyncClient(String blobName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.getBlobAsyncClient#String-String} + * + *
+     * BlobAsyncClient blobAsyncClient = client.getBlobAsyncClient(blobName, snapshot);
+     * 
+ * * * @param blobName A {@code String} representing the name of the blob. If the blob name contains special characters, * pass in the url encoded version of the blob name. @@ -209,7 +217,12 @@ public String getBlobContainerUrl() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.getBlobContainerName} + * + *
+     * String containerName = client.getBlobContainerName();
+     * System.out.println("The name of the blob is " + containerName);
+     * 
+ * * * @return The name of container. */ @@ -292,7 +305,11 @@ public String getEncryptionScope() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.exists} + * + *
+     * client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response));
+     * 
+ * * * @return true if the container exists, false if it doesn't */ @@ -310,7 +327,11 @@ public Mono exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.existsWithResponse} + * + *
+     * client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.getValue()));
+     * 
+ * * * @return true if the container exists, false if it doesn't */ @@ -346,7 +367,13 @@ Mono> existsWithResponse(Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.create} + * + *
+     * client.create().subscribe(
+     *     response -> System.out.printf("Create completed%n"),
+     *     error -> System.out.printf("Error while creating container %s%n", error));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -366,7 +393,13 @@ public Mono create() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.createWithResponse#Map-PublicAccessType} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * client.createWithResponse(metadata, PublicAccessType.CONTAINER).subscribe(response ->
+     *     System.out.printf("Create completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -399,7 +432,13 @@ Mono> createWithResponse(Map metadata, PublicAcce * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.delete} + * + *
+     * client.delete().subscribe(
+     *     response -> System.out.printf("Delete completed%n"),
+     *     error -> System.out.printf("Delete failed: %s%n", error));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -419,7 +458,16 @@ public Mono delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.deleteWithResponse#BlobRequestConditions} + * + *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.deleteWithResponse(requestConditions).subscribe(response ->
+     *     System.out.printf("Delete completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param requestConditions {@link BlobRequestConditions} * @return A reactive response signalling completion. @@ -459,7 +507,15 @@ Mono> deleteWithResponse(BlobRequestConditions requestConditions, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.getProperties} + * + *
+     * client.getProperties().subscribe(response ->
+     *     System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n",
+     *         response.getBlobPublicAccess(),
+     *         response.hasLegalHold(),
+     *         response.hasImmutabilityPolicy()));
+     * 
+ * * * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} containing the * container properties. @@ -479,7 +535,15 @@ public Mono getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.getPropertiesWithResponse#String} + * + *
+     * client.getPropertiesWithResponse(leaseId).subscribe(response ->
+     *     System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n",
+     *         response.getValue().getBlobPublicAccess(),
+     *         response.getValue().hasLegalHold(),
+     *         response.getValue().hasImmutabilityPolicy()));
+     * 
+ * * * @param leaseId The lease ID the active lease on the container must match. * @return A reactive response containing the container properties. @@ -516,7 +580,14 @@ Mono> getPropertiesWithResponse(String leaseId * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.setMetadata#Map} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * client.setMetadata(metadata).subscribe(
+     *     response -> System.out.printf("Set metadata completed%n"),
+     *     error -> System.out.printf("Set metadata failed: %s%n", error));
+     * 
+ * * * @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -538,7 +609,17 @@ public Mono setMetadata(Map metadata) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.setMetadataWithResponse#Map-BlobRequestConditions} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.setMetadataWithResponse(metadata, requestConditions).subscribe(response ->
+     *     System.out.printf("Set metadata completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -582,7 +663,19 @@ Mono> setMetadataWithResponse(Map metadata, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicy} + * + *
+     * client.getAccessPolicy().subscribe(response -> {
+     *     System.out.printf("Blob Access Type: %s%n", response.getBlobAccessType());
+     *
+     *     for (BlobSignedIdentifier identifier : response.getIdentifiers()) {
+     *         System.out.printf("Identifier Name: %s, Permissions %s%n",
+     *             identifier.getId(),
+     *             identifier.getAccessPolicy().getPermissions());
+     *     }
+     * });
+     * 
+ * * * @return A reactive response containing the container access policy. */ @@ -602,7 +695,19 @@ public Mono getAccessPolicy() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.getAccessPolicyWithResponse#String} + * + *
+     * client.getAccessPolicyWithResponse(leaseId).subscribe(response -> {
+     *     System.out.printf("Blob Access Type: %s%n", response.getValue().getBlobAccessType());
+     *
+     *     for (BlobSignedIdentifier identifier : response.getValue().getIdentifiers()) {
+     *         System.out.printf("Identifier Name: %s, Permissions %s%n",
+     *             identifier.getId(),
+     *             identifier.getAccessPolicy().getPermissions());
+     *     }
+     * });
+     * 
+ * * * @param leaseId The lease ID the active lease on the container must match. * @return A reactive response containing the container access policy. @@ -634,7 +739,20 @@ Mono> getAccessPolicyWithResponse(String l * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicy#PublicAccessType-List} + * + *
+     * BlobSignedIdentifier identifier = new BlobSignedIdentifier()
+     *     .setId("name")
+     *     .setAccessPolicy(new BlobAccessPolicy()
+     *         .setStartsOn(OffsetDateTime.now())
+     *         .setExpiresOn(OffsetDateTime.now().plusDays(7))
+     *         .setPermissions("permissionString"));
+     *
+     * client.setAccessPolicy(PublicAccessType.CONTAINER, Collections.singletonList(identifier)).subscribe(
+     *     response -> System.out.printf("Set access policy completed%n"),
+     *     error -> System.out.printf("Set access policy failed: %s%n", error));
+     * 
+ * * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. @@ -661,7 +779,24 @@ public Mono setAccessPolicy(PublicAccessType accessType, ListCode Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.setAccessPolicyWithResponse#PublicAccessType-List-BlobRequestConditions} + * + *
+     * BlobSignedIdentifier identifier = new BlobSignedIdentifier()
+     *     .setId("name")
+     *     .setAccessPolicy(new BlobAccessPolicy()
+     *         .setStartsOn(OffsetDateTime.now())
+     *         .setExpiresOn(OffsetDateTime.now().plusDays(7))
+     *         .setPermissions("permissionString"));
+     *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.setAccessPolicyWithResponse(PublicAccessType.CONTAINER, Collections.singletonList(identifier), requestConditions)
+     *     .subscribe(response ->
+     *         System.out.printf("Set access policy completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. @@ -744,7 +879,12 @@ OffsetDateTime.now will only give back milliseconds (more precise fields are zer * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.listBlobs} + * + *
+     * client.listBlobs().subscribe(blob ->
+     *     System.out.printf("Name: %s, Directory? %b%n", blob.getName(), blob.isPrefix()));
+     * 
+ * * * @return A reactive response emitting the flattened blobs. */ @@ -777,7 +917,22 @@ public PagedFlux listBlobs() { * * * - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.listBlobs#ListBlobsOptions} + * + *
+     * ListBlobsOptions options = new ListBlobsOptions()
+     *     .setPrefix("prefixToMatch")
+     *     .setDetails(new BlobListDetails()
+     *         .setRetrieveDeletedBlobs(true)
+     *         .setRetrieveSnapshots(true));
+     *
+     * client.listBlobs(options).subscribe(blob ->
+     *     System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n",
+     *         blob.getName(),
+     *         blob.isPrefix(),
+     *         blob.isDeleted(),
+     *         blob.getSnapshot()));
+     * 
+ * * * @param options {@link ListBlobsOptions} * @return A reactive response emitting the listed blobs, flattened. @@ -811,7 +966,24 @@ public PagedFlux listBlobs(ListBlobsOptions options) { * * * - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.listBlobs#ListBlobsOptions-String} + * + *
+     * ListBlobsOptions options = new ListBlobsOptions()
+     *     .setPrefix("prefixToMatch")
+     *     .setDetails(new BlobListDetails()
+     *         .setRetrieveDeletedBlobs(true)
+     *         .setRetrieveSnapshots(true));
+     *
+     * String continuationToken = "continuationToken";
+     *
+     * client.listBlobs(options, continuationToken).subscribe(blob ->
+     *     System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n",
+     *         blob.getName(),
+     *         blob.isPrefix(),
+     *         blob.isDeleted(),
+     *         blob.getSnapshot()));
+     * 
+ * * * @param options {@link ListBlobsOptions} * @param continuationToken Identifies the portion of the list to be returned with the next list operation. @@ -928,7 +1100,12 @@ private Mono listBlobsFlatSegment(String * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy#String} + * + *
+     * client.listBlobsByHierarchy("directoryName").subscribe(blob ->
+     *     System.out.printf("Name: %s, Directory? %b%n", blob.getName(), blob.isDeleted()));
+     * 
+ * * * @param directory The directory to list blobs underneath * @return A reactive response emitting the prefixes and blobs. @@ -968,7 +1145,22 @@ public PagedFlux listBlobsByHierarchy(String directory) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.listBlobsByHierarchy#String-ListBlobsOptions} + * + *
+     * ListBlobsOptions options = new ListBlobsOptions()
+     *     .setPrefix("directoryName")
+     *     .setDetails(new BlobListDetails()
+     *         .setRetrieveDeletedBlobs(true)
+     *         .setRetrieveSnapshots(true));
+     *
+     * client.listBlobsByHierarchy("/", options).subscribe(blob ->
+     *     System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n",
+     *         blob.getName(),
+     *         blob.isPrefix(),
+     *         blob.isDeleted(),
+     *         blob.getSnapshot()));
+     * 
+ * * * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories * @param options {@link ListBlobsOptions} @@ -1061,7 +1253,14 @@ private Mono listBlobsHierarchySegme * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfo} + * + *
+     * client.getAccountInfo().subscribe(response ->
+     *     System.out.printf("Account Kind: %s, SKU: %s%n",
+     *         response.getAccountKind(),
+     *         response.getSkuName()));
+     * 
+ * * * @return A reactive response containing the account info. */ @@ -1080,7 +1279,14 @@ public Mono getAccountInfo() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.getAccountInfoWithResponse} + * + *
+     * client.getAccountInfoWithResponse().subscribe(response ->
+     *     System.out.printf("Account Kind: %s, SKU: %s%n",
+     *         response.getValue().getAccountKind(),
+     *         response.getValue().getSkuName()));
+     * 
+ * * * @return A reactive response containing the account info. */ @@ -1108,7 +1314,8 @@ Mono> getAccountInfoWithResponse(Context context) { // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.rename#String} +// * +// * // * // * @param destinationContainerName The new name of the container. // * @return A {@link Mono} containing a {@link BlobContainerAsyncClient} used to interact with the renamed container. @@ -1123,7 +1330,8 @@ Mono> getAccountInfoWithResponse(Context context) { // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.renameWithResponse#BlobContainerRenameOptions} +// * +// * // * // * @param options {@link BlobContainerRenameOptions} // * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a @@ -1171,7 +1379,17 @@ Mono> getAccountInfoWithResponse(Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas#BlobServiceSasSignatureValues-UserDelegationKey} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobContainerSasPermission myPermission = new BlobContainerSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey);
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -1192,7 +1410,17 @@ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServic * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.generateUserDelegationSas#BlobServiceSasSignatureValues-UserDelegationKey-String-Context} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobContainerSasPermission myPermission = new BlobContainerSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey, accountName, new Context("key", "value"));
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -1216,7 +1444,17 @@ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServic * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.generateSas#BlobServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobContainerSasPermission permission = new BlobContainerSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @@ -1233,7 +1471,18 @@ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureV * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerAsyncClient.generateSas#BlobServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobContainerSasPermission permission = new BlobContainerSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerClient.java index 2c6bf6ab69297..7a313b20c6759 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerClient.java @@ -74,7 +74,11 @@ public final class BlobContainerClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.getBlobClient#String} + * + *
+     * BlobClient blobClient = client.getBlobClient(blobName);
+     * 
+ * * @return A new {@link BlobClient} object which references the blob with the specified name in this container. */ public BlobClient getBlobClient(String blobName) { @@ -87,7 +91,11 @@ public BlobClient getBlobClient(String blobName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.getBlobClient#String-String} + * + *
+     * BlobClient blobClient = client.getBlobClient(blobName, snapshot);
+     * 
+ * * * @param blobName A {@code String} representing the name of the blob. If the blob name contains special characters, * pass in the url encoded version of the blob name. @@ -116,7 +124,12 @@ public BlobClient getBlobVersionClient(String blobName, String versionId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.getBlobContainerName} + * + *
+     * String containerName = client.getBlobContainerName();
+     * System.out.println("The name of the blob is " + containerName);
+     * 
+ * * * @return The name of container. */ @@ -202,7 +215,11 @@ public String getEncryptionScope() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.exists} + * + *
+     * System.out.printf("Exists? %b%n", client.exists());
+     * 
+ * * * @return true if the container exists, false if it doesn't */ @@ -215,7 +232,12 @@ public boolean exists() { * Gets if the container this client represents exists in the cloud. *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.existsWithResponse#Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * System.out.printf("Exists? %b%n", client.existsWithResponse(timeout, context).getValue());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -235,7 +257,18 @@ public Response existsWithResponse(Duration timeout, Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.create} + * + *
+     * try {
+     *     client.create();
+     *     System.out.printf("Create completed%n");
+     * } catch (BlobStorageException error) {
+     *     if (error.getErrorCode().equals(BlobErrorCode.CONTAINER_ALREADY_EXISTS)) {
+     *         System.out.printf("Can't create container. It already exists %n");
+     *     }
+     * }
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public void create() { @@ -249,7 +282,15 @@ public void create() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.createWithResponse#Map-PublicAccessType-Duration-Context} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Create completed with status %d%n",
+     *     client.createWithResponse(metadata, PublicAccessType.CONTAINER, timeout, context).getStatusCode());
+     * 
+ * * * @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -273,7 +314,18 @@ public Response createWithResponse(Map metadata, PublicAcc * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.delete} + * + *
+     * try {
+     *     client.delete();
+     *     System.out.printf("Delete completed%n");
+     * } catch (BlobStorageException error) {
+     *     if (error.getErrorCode().equals(BlobErrorCode.CONTAINER_NOT_FOUND)) {
+     *         System.out.printf("Delete failed. Container was not found %n");
+     *     }
+     * }
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public void delete() { @@ -287,7 +339,17 @@ public void delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.deleteWithResponse#BlobRequestConditions-Duration-Context} + * + *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Delete completed with status %d%n", client.deleteWithResponse(
+     *     requestConditions, timeout, context).getStatusCode());
+     * 
+ * * * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -308,7 +370,15 @@ public Response deleteWithResponse(BlobRequestConditions requestConditions * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.getProperties} + * + *
+     * BlobContainerProperties properties = client.getProperties();
+     * System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n",
+     *     properties.getBlobPublicAccess(),
+     *     properties.hasLegalHold(),
+     *     properties.hasImmutabilityPolicy());
+     * 
+ * * * @return The container properties. */ @@ -323,7 +393,18 @@ public BlobContainerProperties getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.getPropertiesWithResponse#String-Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     *
+     * BlobContainerProperties properties = client.getPropertiesWithResponse(leaseId, timeout, context)
+     *     .getValue();
+     * System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n",
+     *     properties.getBlobPublicAccess(),
+     *     properties.hasLegalHold(),
+     *     properties.hasImmutabilityPolicy());
+     * 
+ * * * @param leaseId The lease ID the active lease on the container must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -342,7 +423,17 @@ public Response getPropertiesWithResponse(String leaseI * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.setMetadata#Map} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * try {
+     *     client.setMetadata(metadata);
+     *     System.out.printf("Set metadata completed with status %n");
+     * } catch (UnsupportedOperationException error) {
+     *     System.out.printf("Fail while setting metadata %n");
+     * }
+     * 
+ * * * @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -358,7 +449,18 @@ public void setMetadata(Map metadata) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.setMetadataWithResponse#Map-BlobRequestConditions-Duration-Context} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Set metadata completed with status %d%n",
+     *     client.setMetadataWithResponse(metadata, requestConditions, timeout, context).getStatusCode());
+     * 
+ * * @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link BlobRequestConditions} @@ -381,7 +483,18 @@ public Response setMetadataWithResponse(Map metadata, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.getAccessPolicy} + * + *
+     * BlobContainerAccessPolicies accessPolicies = client.getAccessPolicy();
+     * System.out.printf("Blob Access Type: %s%n", accessPolicies.getBlobAccessType());
+     *
+     * for (BlobSignedIdentifier identifier : accessPolicies.getIdentifiers()) {
+     *     System.out.printf("Identifier Name: %s, Permissions %s%n",
+     *         identifier.getId(),
+     *         identifier.getAccessPolicy().getPermissions());
+     * }
+     * 
+ * * * @return The container access policy. */ @@ -397,7 +510,20 @@ public BlobContainerAccessPolicies getAccessPolicy() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.getAccessPolicyWithResponse#String-Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * BlobContainerAccessPolicies accessPolicies = client.getAccessPolicyWithResponse(leaseId, timeout, context)
+     *     .getValue();
+     * System.out.printf("Blob Access Type: %s%n", accessPolicies.getBlobAccessType());
+     *
+     * for (BlobSignedIdentifier identifier : accessPolicies.getIdentifiers()) {
+     *     System.out.printf("Identifier Name: %s, Permissions %s%n",
+     *         identifier.getId(),
+     *         identifier.getAccessPolicy().getPermissions());
+     * }
+     * 
+ * * * @param leaseId The lease ID the active lease on the container must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -418,7 +544,23 @@ public Response getAccessPolicyWithResponse(String * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.setAccessPolicy#PublicAccessType-List} + * + *
+     * BlobSignedIdentifier identifier = new BlobSignedIdentifier()
+     *     .setId("name")
+     *     .setAccessPolicy(new BlobAccessPolicy()
+     *         .setStartsOn(OffsetDateTime.now())
+     *         .setExpiresOn(OffsetDateTime.now().plusDays(7))
+     *         .setPermissions("permissionString"));
+     *
+     * try {
+     *     client.setAccessPolicy(PublicAccessType.CONTAINER, Collections.singletonList(identifier));
+     *     System.out.printf("Set Access Policy completed %n");
+     * } catch (UnsupportedOperationException error) {
+     *     System.out.printf("Set Access Policy completed %s%n", error);
+     * }
+     * 
+ * * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. @@ -441,7 +583,29 @@ public void setAccessPolicy(PublicAccessType accessType, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.setAccessPolicyWithResponse#PublicAccessType-List-BlobRequestConditions-Duration-Context} + * + *
+     * BlobSignedIdentifier identifier = new BlobSignedIdentifier()
+     *     .setId("name")
+     *     .setAccessPolicy(new BlobAccessPolicy()
+     *         .setStartsOn(OffsetDateTime.now())
+     *         .setExpiresOn(OffsetDateTime.now().plusDays(7))
+     *         .setPermissions("permissionString"));
+     *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Set access policy completed with status %d%n",
+     *     client.setAccessPolicyWithResponse(PublicAccessType.CONTAINER,
+     *         Collections.singletonList(identifier),
+     *         requestConditions,
+     *         timeout,
+     *         context).getStatusCode());
+     * 
+ * * * @param accessType Specifies how the data in this container is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. @@ -477,7 +641,12 @@ public Response setAccessPolicyWithResponse(PublicAccessType accessType, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.listBlobs} + * + *
+     * client.listBlobs().forEach(blob ->
+     *     System.out.printf("Name: %s, Directory? %b%n", blob.getName(), blob.isPrefix()));
+     * 
+ * * * @return The listed blobs, flattened. */ @@ -499,7 +668,22 @@ public PagedIterable listBlobs() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.listBlobs#ListBlobsOptions-Duration} + * + *
+     * ListBlobsOptions options = new ListBlobsOptions()
+     *     .setPrefix("prefixToMatch")
+     *     .setDetails(new BlobListDetails()
+     *         .setRetrieveDeletedBlobs(true)
+     *         .setRetrieveSnapshots(true));
+     *
+     * client.listBlobs(options, timeout).forEach(blob ->
+     *     System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n",
+     *         blob.getName(),
+     *         blob.isPrefix(),
+     *         blob.isDeleted(),
+     *         blob.getSnapshot()));
+     * 
+ * * * @param options {@link ListBlobsOptions}. If iterating by page, the page size passed to byPage methods such as * {@link PagedIterable#iterableByPage(int)} will be preferred over the value set on these options. @@ -524,7 +708,24 @@ public PagedIterable listBlobs(ListBlobsOptions options, Duration time * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.listBlobs#ListBlobsOptions-String-Duration} + * + *
+     * ListBlobsOptions options = new ListBlobsOptions()
+     *     .setPrefix("prefixToMatch")
+     *     .setDetails(new BlobListDetails()
+     *         .setRetrieveDeletedBlobs(true)
+     *         .setRetrieveSnapshots(true));
+     *
+     * String continuationToken = "continuationToken";
+     *
+     * client.listBlobs(options, continuationToken, timeout).forEach(blob ->
+     *     System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n",
+     *         blob.getName(),
+     *         blob.isPrefix(),
+     *         blob.isDeleted(),
+     *         blob.getSnapshot()));
+     * 
+ * * * @param options {@link ListBlobsOptions}. If iterating by page, the page size passed to byPage methods such as * {@link PagedIterable#iterableByPage(int)} will be preferred over the value set on these options. @@ -563,7 +764,12 @@ public PagedIterable listBlobs(ListBlobsOptions options, String contin * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.listBlobsByHierarchy#String} + * + *
+     * client.listBlobsByHierarchy("directoryName").forEach(blob ->
+     *     System.out.printf("Name: %s, Directory? %b%n", blob.getName(), blob.isPrefix()));
+     * 
+ * * * @param directory The directory to list blobs underneath * @return A reactive response emitting the prefixes and blobs. @@ -599,7 +805,22 @@ public PagedIterable listBlobsByHierarchy(String directory) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.listBlobsByHierarchy#String-ListBlobsOptions-Duration} + * + *
+     * ListBlobsOptions options = new ListBlobsOptions()
+     *     .setPrefix("directoryName")
+     *     .setDetails(new BlobListDetails()
+     *         .setRetrieveDeletedBlobs(true)
+     *         .setRetrieveSnapshots(true));
+     *
+     * client.listBlobsByHierarchy("/", options, timeout).forEach(blob ->
+     *     System.out.printf("Name: %s, Directory? %b, Deleted? %b, Snapshot ID: %s%n",
+     *         blob.getName(),
+     *         blob.isPrefix(),
+     *         blob.isDeleted(),
+     *         blob.getSnapshot()));
+     * 
+ * * * @param delimiter The delimiter for blob hierarchy, "/" for hierarchy based on directories * @param options {@link ListBlobsOptions}. If iterating by page, the page size passed to byPage methods such as @@ -621,7 +842,12 @@ public PagedIterable listBlobsByHierarchy(String delimiter, ListBlobsO * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.getAccountInfo#Duration} + * + *
+     * StorageAccountInfo accountInfo = client.getAccountInfo(timeout);
+     * System.out.printf("Account Kind: %s, SKU: %s%n", accountInfo.getAccountKind(), accountInfo.getSkuName());
+     * 
+ * * @return The account info. */ @ServiceMethod(returns = ReturnType.SINGLE) @@ -635,7 +861,13 @@ public StorageAccountInfo getAccountInfo(Duration timeout) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.getAccountInfoWithResponse#Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * StorageAccountInfo accountInfo = client.getAccountInfoWithResponse(timeout, context).getValue();
+     * System.out.printf("Account Kind: %s, SKU: %s%n", accountInfo.getAccountKind(), accountInfo.getSkuName());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -653,7 +885,8 @@ public Response getAccountInfoWithResponse(Duration timeout, // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.blob.BlobContainerClient.rename#String} +// * +// * // * // * @param destinationContainerName The new name of the container. // * @return A {@link BlobContainerClient} used to interact with the renamed container. @@ -669,7 +902,8 @@ public Response getAccountInfoWithResponse(Duration timeout, // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.blob.BlobContainerClient.renameWithResponse#BlobContainerRenameOptions-Duration-Context} +// * +// * // * // * @param options {@link BlobContainerRenameOptions} // * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -692,7 +926,17 @@ public Response getAccountInfoWithResponse(Duration timeout, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.generateUserDelegationSas#BlobServiceSasSignatureValues-UserDelegationKey} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobContainerSasPermission myPermission = new BlobContainerSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey);
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -712,7 +956,17 @@ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServic * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.generateUserDelegationSas#BlobServiceSasSignatureValues-UserDelegationKey-String-Context} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobContainerSasPermission myPermission = new BlobContainerSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey, accountName, new Context("key", "value"));
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -736,7 +990,17 @@ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServic * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.generateSas#BlobServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobContainerSasPermission permission = new BlobContainerSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @@ -753,7 +1017,18 @@ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureV * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClient.generateSas#BlobServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobContainerSasPermission permission = new BlobContainerSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerClientBuilder.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerClientBuilder.java index 1c7932727ad40..0db7b13389101 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerClientBuilder.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobContainerClientBuilder.java @@ -88,7 +88,13 @@ public BlobContainerClientBuilder() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClientBuilder.buildClient} + * + *
+     * BlobContainerClient client = new BlobContainerClientBuilder()
+     *     .connectionString(connectionString)
+     *     .buildClient();
+     * 
+ * * * @return a {@link BlobContainerClient} created from the configurations in this builder. * @throws IllegalStateException If multiple credentials have been specified. @@ -102,7 +108,13 @@ public BlobContainerClient buildClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobContainerClientBuilder.buildAsyncClient} + * + *
+     * BlobContainerAsyncClient client = new BlobContainerClientBuilder()
+     *     .connectionString(connectionString)
+     *     .buildAsyncClient();
+     * 
+ * * * @return a {@link BlobContainerAsyncClient} created from the configurations in this builder. * @throws IllegalStateException If multiple credentials have been specified. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceAsyncClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceAsyncClient.java index 747fc45172762..a6951e7c416c2 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceAsyncClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceAsyncClient.java @@ -141,7 +141,11 @@ public final class BlobServiceAsyncClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.getBlobContainerAsyncClient#String} + * + *
+     * BlobContainerAsyncClient blobContainerAsyncClient = client.getBlobContainerAsyncClient("containerName");
+     * 
+ * * * @param containerName The name of the container to point to. A value of null or empty string will be interpreted * as pointing to the root container and will be replaced by "$root". @@ -181,7 +185,12 @@ public BlobServiceVersion getServiceVersion() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.createBlobContainer#String} + * + *
+     * BlobContainerAsyncClient blobContainerAsyncClient =
+     *     client.createBlobContainer("containerName").block();
+     * 
+ * * * @param containerName Name of the container to create * @return A {@link Mono} containing a {@link BlobContainerAsyncClient} used to interact with the container created. @@ -202,7 +211,14 @@ public Mono createBlobContainer(String containerName) * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.createBlobContainerWithResponse#String-Map-PublicAccessType} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     *
+     * BlobContainerAsyncClient containerClient = client
+     *     .createBlobContainerWithResponse("containerName", metadata, PublicAccessType.CONTAINER).block().getValue();
+     * 
+ * * * @param containerName Name of the container to create * @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any @@ -238,7 +254,13 @@ Mono> createBlobContainerWithResponse(String * Docs. *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.deleteBlobContainer#String} + * + *
+     * client.deleteBlobContainer("containerName").subscribe(
+     *     response -> System.out.printf("Delete container completed%n"),
+     *     error -> System.out.printf("Delete container failed: %s%n", error));
+     * 
+ * * * @param containerName Name of the container to delete * @return A {@link Mono} containing containing status code and HTTP headers @@ -259,7 +281,13 @@ public Mono deleteBlobContainer(String containerName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.deleteBlobContainerWithResponse#String-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * client.deleteBlobContainerWithResponse("containerName").subscribe(response ->
+     *     System.out.printf("Delete container completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param containerName Name of the container to delete * @return A {@link Mono} containing containing status code and HTTP headers @@ -293,7 +321,11 @@ public String getAccountUrl() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.listBlobContainers} + * + *
+     * client.listBlobContainers().subscribe(container -> System.out.printf("Name: %s%n", container.getName()));
+     * 
+ * * * @return A reactive response emitting the list of containers. */ @@ -312,7 +344,15 @@ public PagedFlux listBlobContainers() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.listBlobContainers#ListBlobContainersOptions} + * + *
+     * ListBlobContainersOptions options = new ListBlobContainersOptions()
+     *     .setPrefix("containerNamePrefixToMatch")
+     *     .setDetails(new BlobContainerListDetails().setRetrieveMetadata(true));
+     *
+     * client.listBlobContainers(options).subscribe(container -> System.out.printf("Name: %s%n", container.getName()));
+     * 
+ * * * @param options A {@link ListBlobContainersOptions} which specifies what data should be returned by the service. * @return A reactive response emitting the list of containers. @@ -367,7 +407,11 @@ private Mono> listBlobContainersSegment(String * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.findBlobsByTag#String} + * + *
+     * client.findBlobsByTags("where=tag=value").subscribe(blob -> System.out.printf("Name: %s%n", blob.getName()));
+     * 
+ * * * @param query Filters the results to return only blobs whose tags match the specified expression. * @return A reactive response emitting the list of blobs. @@ -383,7 +427,12 @@ public PagedFlux findBlobsByTags(String query) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncServiceClient.findBlobsByTag#FindBlobsOptions} + * + *
+     * client.findBlobsByTags(new FindBlobsOptions("where=tag=value").setMaxResultsPerPage(10))
+     *     .subscribe(blob -> System.out.printf("Name: %s%n", blob.getName()));
+     * 
+ * * * @param options {@link FindBlobsOptions} * @return A reactive response emitting the list of blobs. @@ -479,7 +528,14 @@ private List toIncludeTypes(BlobContainerListDeta * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.getProperties} + * + *
+     * client.getProperties().subscribe(response ->
+     *     System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b%n",
+     *         response.getHourMetrics().isEnabled(),
+     *         response.getMinuteMetrics().isEnabled()));
+     * 
+ * * * @return A reactive response containing the storage account properties. */ @@ -498,7 +554,14 @@ public Mono getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.getPropertiesWithResponse} + * + *
+     * client.getPropertiesWithResponse().subscribe(response ->
+     *     System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b%n",
+     *         response.getValue().getHourMetrics().isEnabled(),
+     *         response.getValue().getMinuteMetrics().isEnabled()));
+     * 
+ * * * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains the storage * account properties. @@ -530,7 +593,28 @@ Mono> getPropertiesWithResponse(Context context) * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.setProperties#BlobServiceProperties} + * + *
+     * BlobRetentionPolicy loggingRetentionPolicy = new BlobRetentionPolicy().setEnabled(true).setDays(3);
+     * BlobRetentionPolicy metricsRetentionPolicy = new BlobRetentionPolicy().setEnabled(true).setDays(1);
+     *
+     * BlobServiceProperties properties = new BlobServiceProperties()
+     *     .setLogging(new BlobAnalyticsLogging()
+     *         .setWrite(true)
+     *         .setDelete(true)
+     *         .setRetentionPolicy(loggingRetentionPolicy))
+     *     .setHourMetrics(new BlobMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy))
+     *     .setMinuteMetrics(new BlobMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy));
+     *
+     * client.setProperties(properties).subscribe(
+     *     response -> System.out.printf("Setting properties completed%n"),
+     *     error -> System.out.printf("Setting properties failed: %s%n", error));
+     * 
+ * * * @param properties Configures the service. * @return A {@link Mono} containing the storage account properties. @@ -553,7 +637,27 @@ public Mono setProperties(BlobServiceProperties properties) { * If CORS policies are set, CORS parameters that are not set default to the empty string.

*

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.setPropertiesWithResponse#BlobServiceProperties} + * + *
+     * BlobRetentionPolicy loggingRetentionPolicy = new BlobRetentionPolicy().setEnabled(true).setDays(3);
+     * BlobRetentionPolicy metricsRetentionPolicy = new BlobRetentionPolicy().setEnabled(true).setDays(1);
+     *
+     * BlobServiceProperties properties = new BlobServiceProperties()
+     *     .setLogging(new BlobAnalyticsLogging()
+     *         .setWrite(true)
+     *         .setDelete(true)
+     *         .setRetentionPolicy(loggingRetentionPolicy))
+     *     .setHourMetrics(new BlobMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy))
+     *     .setMinuteMetrics(new BlobMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy));
+     *
+     * client.setPropertiesWithResponse(properties).subscribe(response ->
+     *     System.out.printf("Setting properties completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param properties Configures the service. * @return A {@link Mono} containing the storage account properties. @@ -670,7 +774,12 @@ private void validateRetentionPolicy(BlobRetentionPolicy retentionPolicy, String * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.getUserDelegationKey#OffsetDateTime-OffsetDateTime} + * + *
+     * client.getUserDelegationKey(delegationKeyStartTime, delegationKeyExpiryTime).subscribe(response ->
+     *     System.out.printf("User delegation key: %s%n", response.getValue()));
+     * 
+ * * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. @@ -694,7 +803,12 @@ public Mono getUserDelegationKey(OffsetDateTime start, Offset * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.getUserDelegationKeyWithResponse#OffsetDateTime-OffsetDateTime} + * + *
+     * client.getUserDelegationKeyWithResponse(delegationKeyStartTime, delegationKeyExpiryTime).subscribe(response ->
+     *     System.out.printf("User delegation key: %s%n", response.getValue().getValue()));
+     * 
+ * * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. @@ -739,7 +853,12 @@ Mono> getUserDelegationKeyWithResponse(OffsetDateTim * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.getStatistics} + * + *
+     * client.getStatistics().subscribe(response ->
+     *     System.out.printf("Geo-replication status: %s%n", response.getGeoReplication().getStatus()));
+     * 
+ * * * @return A {@link Mono} containing the storage account statistics. */ @@ -760,7 +879,12 @@ public Mono getStatistics() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.getStatisticsWithResponse} + * + *
+     * client.getStatisticsWithResponse().subscribe(response ->
+     *     System.out.printf("Geo-replication status: %s%n", response.getValue().getGeoReplication().getStatus()));
+     * 
+ * * * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} containing the * storage account statistics. @@ -788,7 +912,12 @@ Mono> getStatisticsWithResponse(Context context) * Azure Docs. *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.getAccountInfo} + * + *
+     * client.getAccountInfo().subscribe(response ->
+     *     System.out.printf("Account kind: %s, SKU: %s%n", response.getAccountKind(), response.getSkuName()));
+     * 
+ * * * @return A {@link Mono} containing containing the storage account info. */ @@ -807,7 +936,13 @@ public Mono getAccountInfo() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.getAccountInfoWithResponse} + * + *
+     * client.getAccountInfoWithResponse().subscribe(response ->
+     *     System.out.printf("Account kind: %s, SKU: %s%n", response.getValue().getAccountKind(),
+     *         response.getValue().getSkuName()));
+     * 
+ * * * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} the storage account * info. @@ -846,7 +981,22 @@ public String getAccountName() { * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to blob * containers and file shares.

- * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.generateAccountSas#AccountSasSignatureValues} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = client.generateAccountSas(sasValues);
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @@ -863,7 +1013,22 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to blob * containers and file shares.

- * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.generateAccountSas#AccountSasSignatureValues-Context} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = client.generateAccountSas(sasValues, new Context("key", "value"));
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. @@ -895,7 +1060,19 @@ private void throwOnAnonymousAccess() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.undeleteBlobContainer#String-String} + * + *
+     * ListBlobContainersOptions listBlobContainersOptions = new ListBlobContainersOptions();
+     * listBlobContainersOptions.getDetails().setRetrieveDeleted(true);
+     * client.listBlobContainers(listBlobContainersOptions).flatMap(
+     *     deletedContainer -> {
+     *         Mono<BlobContainerAsyncClient> blobContainerClient = client.undeleteBlobContainer(
+     *             deletedContainer.getName(), deletedContainer.getVersion());
+     *         return blobContainerClient;
+     *     }
+     * ).then().block();
+     * 
+ * * * @param deletedContainerName The name of the previously deleted container. * @param deletedContainerVersion The version of the previously deleted container. @@ -921,7 +1098,20 @@ public Mono undeleteBlobContainer( * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.undeleteBlobContainerWithResponse#UndeleteBlobContainerOptions} + * + *
+     * ListBlobContainersOptions listBlobContainersOptions = new ListBlobContainersOptions();
+     * listBlobContainersOptions.getDetails().setRetrieveDeleted(true);
+     * client.listBlobContainers(listBlobContainersOptions).flatMap(
+     *     deletedContainer -> {
+     *         Mono<BlobContainerAsyncClient> blobContainerClient = client.undeleteBlobContainerWithResponse(
+     *             new UndeleteBlobContainerOptions(deletedContainer.getName(), deletedContainer.getVersion()))
+     *             .map(Response::getValue);
+     *         return blobContainerClient;
+     *     }
+     * ).then().block();
+     * 
+ * * * @param options {@link UndeleteBlobContainerOptions}. * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link @@ -957,7 +1147,8 @@ Mono> undeleteBlobContainerWithResponse( // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.renameBlobContainer#String-String} +// * +// * // * // * @param sourceContainerName The current name of the container. // * @param destinationContainerName The new name of the container. @@ -975,7 +1166,8 @@ Mono> undeleteBlobContainerWithResponse( // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.blob.BlobServiceAsyncClient.renameBlobContainerWithResponse#String-BlobContainerRenameOptions} +// * +// * // * // * @param sourceContainerName The current name of the container. // * @param options {@link BlobContainerRenameOptions} diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceClient.java index 7f15e0b63ee41..0a2e272e92e6a 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/BlobServiceClient.java @@ -63,7 +63,11 @@ public final class BlobServiceClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.getBlobContainerClient#String} + * + *
+     * BlobContainerClient blobContainerClient = client.getBlobContainerClient("containerName");
+     * 
+ * * * @param containerName The name of the container to point to. * @return A {@link BlobContainerClient} object pointing to the specified container @@ -97,7 +101,11 @@ public BlobServiceVersion getServiceVersion() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.createBlobContainer#String} + * + *
+     * BlobContainerClient blobContainerClient = client.createBlobContainer("containerName");
+     * 
+ * * * @param containerName Name of the container to create * @return The {@link BlobContainerClient} used to interact with the container created. @@ -114,7 +122,18 @@ public BlobContainerClient createBlobContainer(String containerName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.createBlobContainerWithResponse#String-Map-PublicAccessType-Context} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Context context = new Context("Key", "Value");
+     *
+     * BlobContainerClient blobContainerClient = client.createBlobContainerWithResponse(
+     *     "containerName",
+     *     metadata,
+     *     PublicAccessType.CONTAINER,
+     *     context).getValue();
+     * 
+ * * * @param containerName Name of the container to create * @param metadata Metadata to associate with the container. If there is leading or trailing whitespace in any @@ -139,7 +158,16 @@ public Response createBlobContainerWithResponse(String cont * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.deleteBlobContainer#String} + * + *
+     * try {
+     *     client.deleteBlobContainer("container Name");
+     *     System.out.printf("Delete container completed with status %n");
+     * } catch (UnsupportedOperationException error) {
+     *     System.out.printf("Delete container failed: %s%n", error);
+     * }
+     * 
+ * * * @param containerName Name of the container to delete */ @@ -178,7 +206,11 @@ public String getAccountUrl() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.listBlobContainers} + * + *
+     * client.listBlobContainers().forEach(container -> System.out.printf("Name: %s%n", container.getName()));
+     * 
+ * * * @return The list of containers. */ @@ -194,7 +226,15 @@ public PagedIterable listBlobContainers() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.listBlobContainers#ListBlobContainersOptions-Duration} + * + *
+     * ListBlobContainersOptions options = new ListBlobContainersOptions()
+     *     .setPrefix("containerNamePrefixToMatch")
+     *     .setDetails(new BlobContainerListDetails().setRetrieveMetadata(true));
+     *
+     * client.listBlobContainers(options, timeout).forEach(container -> System.out.printf("Name: %s%n", container.getName()));
+     * 
+ * * * @param options A {@link ListBlobContainersOptions} which specifies what data should be returned by the service. * If iterating by page, the page size passed to byPage methods such as @@ -214,7 +254,11 @@ public PagedIterable listBlobContainers(ListBlobContainersOpt * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.findBlobsByTag#String} + * + *
+     * client.findBlobsByTags("where=tag=value").forEach(blob -> System.out.printf("Name: %s%n", blob.getName()));
+     * 
+ * * * @param query Filters the results to return only blobs whose tags match the specified expression. * @return The list of blobs. @@ -231,7 +275,13 @@ public PagedIterable findBlobsByTags(String query) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.findBlobsByTag#FindBlobsOptions-Duration} + * + *
+     * Context context = new Context("Key", "Value");
+     * client.findBlobsByTags(new FindBlobsOptions("where=tag=value").setMaxResultsPerPage(10), timeout, context)
+     *     .forEach(blob -> System.out.printf("Name: %s%n", blob.getName()));
+     * 
+ * * * @param options {@link FindBlobsOptions}. If iterating by page, the page size passed to byPage methods such as * {@link PagedIterable#iterableByPage(int)} will be preferred over the value set on these options. @@ -250,7 +300,15 @@ public PagedIterable findBlobsByTags(FindBlobsOptions options, D * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.getProperties} + * + *
+     * BlobServiceProperties properties = client.getProperties();
+     *
+     * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b%n",
+     *     properties.getHourMetrics().isEnabled(),
+     *     properties.getMinuteMetrics().isEnabled());
+     * 
+ * * * @return The storage account properties. */ @@ -265,7 +323,16 @@ public BlobServiceProperties getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.getPropertiesWithResponse#Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * BlobServiceProperties properties = client.getPropertiesWithResponse(timeout, context).getValue();
+     *
+     * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b%n",
+     *     properties.getHourMetrics().isEnabled(),
+     *     properties.getMinuteMetrics().isEnabled());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -289,7 +356,31 @@ public Response getPropertiesWithResponse(Duration timeou * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.setProperties#BlobServiceProperties} + * + *
+     * BlobRetentionPolicy loggingRetentionPolicy = new BlobRetentionPolicy().setEnabled(true).setDays(3);
+     * BlobRetentionPolicy metricsRetentionPolicy = new BlobRetentionPolicy().setEnabled(true).setDays(1);
+     *
+     * BlobServiceProperties properties = new BlobServiceProperties()
+     *     .setLogging(new BlobAnalyticsLogging()
+     *         .setWrite(true)
+     *         .setDelete(true)
+     *         .setRetentionPolicy(loggingRetentionPolicy))
+     *     .setHourMetrics(new BlobMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy))
+     *     .setMinuteMetrics(new BlobMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy));
+     *
+     * try {
+     *     client.setProperties(properties);
+     *     System.out.printf("Setting properties completed%n");
+     * } catch (UnsupportedOperationException error) {
+     *     System.out.printf("Setting properties failed: %s%n", error);
+     * }
+     * 
+ * * * @param properties Configures the service. */ @@ -308,7 +399,29 @@ public void setProperties(BlobServiceProperties properties) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.setPropertiesWithResponse#BlobServiceProperties-Duration-Context} + * + *
+     * BlobRetentionPolicy loggingRetentionPolicy = new BlobRetentionPolicy().setEnabled(true).setDays(3);
+     * BlobRetentionPolicy metricsRetentionPolicy = new BlobRetentionPolicy().setEnabled(true).setDays(1);
+     *
+     * BlobServiceProperties properties = new BlobServiceProperties()
+     *     .setLogging(new BlobAnalyticsLogging()
+     *         .setWrite(true)
+     *         .setDelete(true)
+     *         .setRetentionPolicy(loggingRetentionPolicy))
+     *     .setHourMetrics(new BlobMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy))
+     *     .setMinuteMetrics(new BlobMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy));
+     *
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Setting properties completed with status %d%n",
+     *     client.setPropertiesWithResponse(properties, timeout, context).getStatusCode());
+     * 
+ * * * @param properties Configures the service. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -329,7 +442,12 @@ public Response setPropertiesWithResponse(BlobServiceProperties properties * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.getUserDelegationKey#OffsetDateTime-OffsetDateTime} + * + *
+     * System.out.printf("User delegation key: %s%n",
+     *     client.getUserDelegationKey(delegationKeyStartTime, delegationKeyExpiryTime));
+     * 
+ * * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. @@ -346,7 +464,12 @@ public UserDelegationKey getUserDelegationKey(OffsetDateTime start, OffsetDateTi * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.getUserDelegationKeyWithResponse#OffsetDateTime-OffsetDateTime-Duration-Context} + * + *
+     * System.out.printf("User delegation key: %s%n",
+     *     client.getUserDelegationKeyWithResponse(delegationKeyStartTime, delegationKeyExpiryTime, timeout, context));
+     * 
+ * * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. @@ -371,7 +494,12 @@ public Response getUserDelegationKeyWithResponse(OffsetDateTi * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.getStatistics} + * + *
+     * System.out.printf("Geo-replication status: %s%n",
+     *     client.getStatistics().getGeoReplication().getStatus());
+     * 
+ * * * @return The storage account statistics. */ @@ -388,7 +516,12 @@ public BlobServiceStatistics getStatistics() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.getStatisticsWithResponse#Duration-Context} + * + *
+     * System.out.printf("Geo-replication status: %s%n",
+     *     client.getStatisticsWithResponse(timeout, context).getValue().getGeoReplication().getStatus());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -407,7 +540,13 @@ public Response getStatisticsWithResponse(Duration timeou * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.getAccountInfo} + * + *
+     * StorageAccountInfo accountInfo = client.getAccountInfo();
+     *
+     * System.out.printf("Account kind: %s, SKU: %s%n", accountInfo.getAccountKind(), accountInfo.getSkuName());
+     * 
+ * * * @return The storage account info. */ @@ -448,7 +587,22 @@ public String getAccountName() { *

Generating an account SAS

*

The snippet below generates an AccountSasSignatureValues object that lasts for two days and gives the user * read and list access to blob and file shares.

- * {@codesnippet com.azure.storage.blob.BlobServiceClient.generateAccountSas#AccountSasSignatureValues} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = client.generateAccountSas(sasValues);
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @@ -467,7 +621,22 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa *

Generating an account SAS

*

The snippet below generates an AccountSasSignatureValues object that lasts for two days and gives the user * read and list access to blob and file shares.

- * {@codesnippet com.azure.storage.blob.BlobServiceClient.generateAccountSas#AccountSasSignatureValues-Context} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = client.generateAccountSas(sasValues, new Context("key", "value"));
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. @@ -487,7 +656,18 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.undeleteBlobContainer#String-String} + * + *
+     * ListBlobContainersOptions listBlobContainersOptions = new ListBlobContainersOptions();
+     * listBlobContainersOptions.getDetails().setRetrieveDeleted(true);
+     * client.listBlobContainers(listBlobContainersOptions, null).forEach(
+     *     deletedContainer -> {
+     *         BlobContainerClient blobContainerClient = client.undeleteBlobContainer(
+     *             deletedContainer.getName(), deletedContainer.getVersion());
+     *     }
+     * );
+     * 
+ * * * @param deletedContainerName The name of the previously deleted container. * @param deletedContainerVersion The version of the previously deleted container. @@ -511,7 +691,19 @@ public BlobContainerClient undeleteBlobContainer(String deletedContainerName, St * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobServiceClient.undeleteBlobContainerWithResponse#UndeleteBlobContainerOptions-Duration-Context} + * + *
+     * ListBlobContainersOptions listBlobContainersOptions = new ListBlobContainersOptions();
+     * listBlobContainersOptions.getDetails().setRetrieveDeleted(true);
+     * client.listBlobContainers(listBlobContainersOptions, null).forEach(
+     *     deletedContainer -> {
+     *         BlobContainerClient blobContainerClient = client.undeleteBlobContainerWithResponse(
+     *             new UndeleteBlobContainerOptions(deletedContainer.getName(), deletedContainer.getVersion()),
+     *             timeout, context).getValue();
+     *     }
+     * );
+     * 
+ * * * @param options {@link UndeleteBlobContainerOptions}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -534,7 +726,8 @@ public Response undeleteBlobContainerWithResponse( // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.blob.BlobServiceClient.renameBlobContainer#String-String} +// * +// * // * // * @param sourceContainerName The current name of the container. // * @param destinationContainerName The new name of the container. @@ -551,7 +744,8 @@ public Response undeleteBlobContainerWithResponse( // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.blob.BlobServiceClient.renameBlobContainerWithResponse#String-BlobContainerRenameOptions-Duration-Context} +// * +// * // * // * @param options {@link BlobContainerRenameOptions} // * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/AppendBlobAsyncClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/AppendBlobAsyncClient.java index cefa209cdfd64..7f8ba61608f6a 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/AppendBlobAsyncClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/AppendBlobAsyncClient.java @@ -147,7 +147,12 @@ public AppendBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedK * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.create} + * + *
+     * client.create().subscribe(response ->
+     *     System.out.printf("Created AppendBlob at %s%n", response.getLastModified()));
+     * 
+ * * * @return A {@link Mono} containing the information of the created appended blob. */ @@ -165,7 +170,13 @@ public Mono create() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.create#boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.create(overwrite).subscribe(response ->
+     *     System.out.printf("Created AppendBlob at %s%n", response.getLastModified()));
+     * 
+ * * * @param overwrite Whether or not to overwrite, should data exist on the blob. * @@ -191,7 +202,19 @@ public Mono create(boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse#BlobHttpHeaders-Map-BlobRequestConditions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentType("binary")
+     *     .setContentLanguage("en-US");
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.createWithResponse(headers, metadata, requestConditions).subscribe(response ->
+     *     System.out.printf("Created AppendBlob at %s%n", response.getValue().getLastModified()));
+     * 
+ * * * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any @@ -214,7 +237,21 @@ public Mono> createWithResponse(BlobHttpHeaders headers * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.createWithResponse#AppendBlobCreateOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentType("binary")
+     *     .setContentLanguage("en-US");
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.createWithResponse(new AppendBlobCreateOptions().setHeaders(headers).setMetadata(metadata)
+     *     .setTags(tags).setRequestConditions(requestConditions)).subscribe(response ->
+     *     System.out.printf("Created AppendBlob at %s%n", response.getValue().getLastModified()));
+     * 
+ * * * @param options {@link AppendBlobCreateOptions} * @return A {@link Mono} containing {@link Response} whose {@link Response#getValue() value} contains the created @@ -262,7 +299,12 @@ Mono> createWithResponse(AppendBlobCreateOptions option * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlock#Flux-long} + * + *
+     * client.appendBlock(data, length).subscribe(response ->
+     *     System.out.printf("AppendBlob has %d committed blocks%n", response.getBlobCommittedBlockCount()));
+     * 
+ * * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. @@ -287,7 +329,17 @@ public Mono appendBlock(Flux data, long length) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockWithResponse#Flux-long-byte-AppendBlobRequestConditions} + * + *
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions()
+     *     .setAppendPosition(POSITION)
+     *     .setMaxSize(maxSize);
+     *
+     * client.appendBlockWithResponse(data, length, md5, requestConditions).subscribe(response ->
+     *     System.out.printf("AppendBlob has %d committed blocks%n", response.getValue().getBlobCommittedBlockCount()));
+     * 
+ * * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. @@ -339,7 +391,12 @@ Mono> appendBlockWithResponse(Flux data, lo * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrl#String-BlobRange} + * + *
+     * client.appendBlockFromUrl(sourceUrl, new BlobRange(offset, count)).subscribe(response ->
+     *     System.out.printf("AppendBlob has %d committed blocks%n", response.getBlobCommittedBlockCount()));
+     * 
+ * * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob @@ -362,7 +419,20 @@ public Mono appendBlockFromUrl(String sourceUrl, BlobRange sourc * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse#String-BlobRange-byte-AppendBlobRequestConditions-BlobRequestConditions} + * + *
+     * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions()
+     *     .setAppendPosition(POSITION)
+     *     .setMaxSize(maxSize);
+     *
+     * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.appendBlockFromUrlWithResponse(sourceUrl, new BlobRange(offset, count), null,
+     *     appendBlobRequestConditions, modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("AppendBlob has %d committed blocks%n", response.getValue().getBlobCommittedBlockCount()));
+     * 
+ * * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob @@ -395,7 +465,22 @@ public Mono> appendBlockFromUrlWithResponse(String sour * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.appendBlockFromUrlWithResponse#AppendBlobAppendBlockFromUrlOptions} + * + *
+     * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions()
+     *     .setAppendPosition(POSITION)
+     *     .setMaxSize(maxSize);
+     *
+     * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.appendBlockFromUrlWithResponse(new AppendBlobAppendBlockFromUrlOptions(sourceUrl)
+     *     .setSourceRange(new BlobRange(offset, count))
+     *     .setDestinationRequestConditions(appendBlobRequestConditions)
+     *     .setSourceRequestConditions(modifiedRequestConditions)).subscribe(response ->
+     *     System.out.printf("AppendBlob has %d committed blocks%n", response.getValue().getBlobCommittedBlockCount()));
+     * 
+ * * * @param options Parameters for the operation. * @return A {@link Mono} containing {@link Response} whose {@link Response#getValue() value} contains the append @@ -450,7 +535,11 @@ Mono> appendBlockFromUrlWithResponse(AppendBlobAppendBl * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.seal} + * + *
+     * client.seal().subscribe(response -> System.out.println("Sealed AppendBlob"));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -469,7 +558,15 @@ public Mono seal() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobAsyncClient.sealWithResponse#AppendBlobSealOptions} + * + *
+     * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions().setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.sealWithResponse(new AppendBlobSealOptions().setRequestConditions(requestConditions))
+     *     .subscribe(response -> System.out.println("Sealed AppendBlob"));
+     * 
+ * * * @param options {@link AppendBlobSealOptions} * @return A reactive response signalling completion. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/AppendBlobClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/AppendBlobClient.java index 0f035b026c090..64f71c4a2f7aa 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/AppendBlobClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/AppendBlobClient.java @@ -128,7 +128,11 @@ public BlobOutputStream getBlobOutputStream(AppendBlobRequestConditions requestC * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.create} + * + *
+     * System.out.printf("Created AppendBlob at %s%n", client.create().getLastModified());
+     * 
+ * * * @return The information of the created appended blob. */ @@ -142,7 +146,12 @@ public AppendBlobItem create() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.create#boolean} + * + *
+     * boolean overwrite = false; // Default value
+     * System.out.printf("Created AppendBlob at %s%n", client.create(overwrite).getLastModified());
+     * 
+ * * * @param overwrite Whether or not to overwrite, should data exist on the blob. * @@ -164,7 +173,22 @@ public AppendBlobItem create(boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.createWithResponse#BlobHttpHeaders-Map-BlobRequestConditions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentType("binary")
+     *     .setContentLanguage("en-US");
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("Created AppendBlob at %s%n",
+     *     client.createWithResponse(headers, metadata, requestConditions, timeout, context).getValue()
+     *         .getLastModified());
+     * 
+ * * * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any @@ -188,7 +212,23 @@ public Response createWithResponse(BlobHttpHeaders headers, Map< * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.createWithResponse#AppendBlobCreateOptions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentType("binary")
+     *     .setContentLanguage("en-US");
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tags", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("Created AppendBlob at %s%n",
+     *     client.createWithResponse(new AppendBlobCreateOptions().setHeaders(headers).setMetadata(metadata)
+     *         .setTags(tags).setRequestConditions(requestConditions), timeout, context).getValue()
+     *         .getLastModified());
+     * 
+ * * * @param options {@link AppendBlobCreateOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -210,7 +250,12 @@ public Response createWithResponse(AppendBlobCreateOptions optio * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.appendBlock#InputStream-long} + * + *
+     * System.out.printf("AppendBlob has %d committed blocks%n",
+     *     client.appendBlock(data, length).getBlobCommittedBlockCount());
+     * 
+ * * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link #getBlobOutputStream()} and writing to the returned OutputStream. @@ -232,7 +277,19 @@ public AppendBlobItem appendBlock(InputStream data, long length) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.appendBlockWithResponse#InputStream-long-byte-AppendBlobRequestConditions-Duration-Context} + * + *
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions()
+     *     .setAppendPosition(POSITION)
+     *     .setMaxSize(maxSize);
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("AppendBlob has %d committed blocks%n",
+     *     client.appendBlockWithResponse(data, length, md5, requestConditions, timeout, context)
+     *         .getValue().getBlobCommittedBlockCount());
+     * 
+ * * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link #getBlobOutputStream()} and writing to the returned OutputStream. @@ -265,7 +322,12 @@ public Response appendBlockWithResponse(InputStream data, long l * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.appendBlockFromUrl#String-BlobRange} + * + *
+     * System.out.printf("AppendBlob has %d committed blocks%n",
+     *     client.appendBlockFromUrl(sourceUrl, new BlobRange(offset, count)).getBlobCommittedBlockCount());
+     * 
+ * * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob @@ -284,7 +346,23 @@ public AppendBlobItem appendBlockFromUrl(String sourceUrl, BlobRange sourceRange * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.appendBlockFromUrlWithResponse#String-BlobRange-byte-AppendBlobRequestConditions-BlobRequestConditions-Duration-Context} + * + *
+     * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions()
+     *     .setAppendPosition(POSITION)
+     *     .setMaxSize(maxSize);
+     *
+     * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("AppendBlob has %d committed blocks%n",
+     *     client.appendBlockFromUrlWithResponse(sourceUrl, new BlobRange(offset, count), null,
+     *         appendBlobRequestConditions, modifiedRequestConditions, timeout,
+     *         context).getValue().getBlobCommittedBlockCount());
+     * 
+ * * * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob @@ -315,7 +393,25 @@ public Response appendBlockFromUrlWithResponse(String sourceUrl, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.appendBlockFromUrlWithResponse#AppendBlobAppendBlockFromUrlOptions-Duration-Context} + * + *
+     * AppendBlobRequestConditions appendBlobRequestConditions = new AppendBlobRequestConditions()
+     *     .setAppendPosition(POSITION)
+     *     .setMaxSize(maxSize);
+     *
+     * BlobRequestConditions modifiedRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("AppendBlob has %d committed blocks%n",
+     *     client.appendBlockFromUrlWithResponse(new AppendBlobAppendBlockFromUrlOptions(sourceUrl)
+     *         .setSourceRange(new BlobRange(offset, count))
+     *         .setDestinationRequestConditions(appendBlobRequestConditions)
+     *         .setSourceRequestConditions(modifiedRequestConditions), timeout,
+     *         context).getValue().getBlobCommittedBlockCount());
+     * 
+ * * * @param options options for the operation * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -335,7 +431,12 @@ public Response appendBlockFromUrlWithResponse(AppendBlobAppendB * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.seal} + * + *
+     * client.seal();
+     * System.out.println("Sealed AppendBlob");
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public void seal() { @@ -347,7 +448,16 @@ public void seal() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.AppendBlobClient.sealWithResponse#AppendBlobSealOptions-Duration-Context} + * + *
+     * AppendBlobRequestConditions requestConditions = new AppendBlobRequestConditions().setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * client.sealWithResponse(new AppendBlobSealOptions().setRequestConditions(requestConditions), timeout, context);
+     * System.out.println("Sealed AppendBlob");
+     * 
+ * * * @param options {@link AppendBlobSealOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobAsyncClientBase.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobAsyncClientBase.java index 09c527614bd2d..450e71940eafa 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobAsyncClientBase.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobAsyncClientBase.java @@ -329,7 +329,12 @@ public String getBlobUrl() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getContainerName} + * + *
+     * String containerName = client.getContainerName();
+     * System.out.println("The name of the container is " + containerName);
+     * 
+ * * * @return The name of the container. */ @@ -342,7 +347,12 @@ public final String getContainerName() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getContainerAsyncClient} + * + *
+     * BlobContainerAsyncClient containerClient = client.getContainerAsyncClient();
+     * System.out.println("The name of the container is " + containerClient.getBlobContainerName());
+     * 
+ * * * @return {@link BlobContainerAsyncClient} */ @@ -366,7 +376,12 @@ final BlobContainerClientBuilder getContainerClientBuilder() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getBlobName} + * + *
+     * String blobName = client.getBlobName();
+     * System.out.println("The name of the blob is " + blobName);
+     * 
+ * * * @return The decoded name of the blob. */ @@ -442,7 +457,11 @@ public String getVersionId() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.exists} + * + *
+     * client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response));
+     * 
+ * * * @return true if the blob exists, false if it doesn't */ @@ -460,7 +479,11 @@ public Mono exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.existsWithResponse} + * + *
+     * client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.getValue()));
+     * 
+ * * * @return true if the blob exists, false if it doesn't */ @@ -502,7 +525,12 @@ Mono> existsWithResponse(Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy#String-Duration} + * + *
+     * client.beginCopy(url, Duration.ofSeconds(3))
+     *     .subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -529,7 +557,21 @@ public PollerFlux beginCopy(String sourceUrl, Duration pollI *

Starting a copy operation

* Starting a copy operation and polling on the responses. * - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy#String-Map-AccessTier-RehydratePriority-RequestConditions-BlobRequestConditions-Duration} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.beginCopy(url, metadata, AccessTier.HOT, RehydratePriority.STANDARD,
+     *     modifiedRequestConditions, blobRequestConditions, Duration.ofSeconds(2))
+     *     .subscribe(response -> {
+     *         BlobCopyInfo info = response.getValue();
+     *         System.out.printf("CopyId: %s. Status: %s%n", info.getCopyId(), info.getCopyStatus());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -570,11 +612,52 @@ public PollerFlux beginCopy(String sourceUrl, MapStarting a copy operation

* Starting a copy operation and polling on the responses. * - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopy#BlobBeginCopyOptions} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobBeginCopySourceRequestConditions modifiedRequestConditions = new BlobBeginCopySourceRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.beginCopy(new BlobBeginCopyOptions(url).setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT)
+     *     .setRehydratePriority(RehydratePriority.STANDARD).setSourceRequestConditions(modifiedRequestConditions)
+     *     .setDestinationRequestConditions(blobRequestConditions).setPollInterval(Duration.ofSeconds(2)))
+     *     .subscribe(response -> {
+     *         BlobCopyInfo info = response.getValue();
+     *         System.out.printf("CopyId: %s. Status: %s%n", info.getCopyId(), info.getCopyStatus());
+     *     });
+     * 
+ * * *

Cancelling a copy operation

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.beginCopyFromUrlCancel#BlobBeginCopyOptions} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobBeginCopySourceRequestConditions modifiedRequestConditions = new BlobBeginCopySourceRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * PollerFlux<BlobCopyInfo, Void> poller = client.beginCopy(new BlobBeginCopyOptions(url)
+     *     .setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT)
+     *     .setRehydratePriority(RehydratePriority.STANDARD).setSourceRequestConditions(modifiedRequestConditions)
+     *     .setDestinationRequestConditions(blobRequestConditions).setPollInterval(Duration.ofSeconds(2)));
+     *
+     * poller.take(Duration.ofMinutes(30))
+     *         .last()
+     *         .flatMap(asyncPollResponse -> {
+     *             if (!asyncPollResponse.getStatus().isComplete()) {
+     *                 return asyncPollResponse
+     *                         .cancelOperation()
+     *                         .then(Mono.error(new RuntimeException("Blob copy taking long time, "
+     *                                 + "operation is cancelled!")));
+     *             }
+     *             return Mono.just(asyncPollResponse);
+     *         }).block();
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -733,7 +816,11 @@ private Mono> onPoll(PollResponse pollR * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrl#String} + * + *
+     * client.abortCopyFromUrl(copyId).doOnSuccess(response -> System.out.println("Aborted copy from URL"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -758,7 +845,12 @@ public Mono abortCopyFromUrl(String copyId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.abortCopyFromUrlWithResponse#String-String} + * + *
+     * client.abortCopyFromUrlWithResponse(copyId, leaseId)
+     *     .subscribe(response -> System.out.printf("Aborted copy completed with status %d%n", response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -793,7 +885,11 @@ Mono> abortCopyFromUrlWithResponse(String copyId, String leaseId, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrl#String} + * + *
+     * client.copyFromUrl(url).subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -818,7 +914,17 @@ public Mono copyFromUrl(String copySource) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrlWithResponse#String-Map-AccessTier-RequestConditions-BlobRequestConditions} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.copyFromUrlWithResponse(url, metadata, AccessTier.HOT, modifiedRequestConditions, blobRequestConditions)
+     *     .subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -851,7 +957,20 @@ public Mono> copyFromUrlWithResponse(String copySource, MapCode Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.copyFromUrlWithResponse#BlobCopyFromUrlOptions} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(url).setMetadata(metadata).setTags(tags)
+     *     .setTier(AccessTier.HOT).setSourceRequestConditions(modifiedRequestConditions)
+     *     .setDestinationRequestConditions(blobRequestConditions))
+     *     .subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -904,7 +1023,18 @@ Mono> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, C * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.download} + * + *
+     * ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
+     * client.download().subscribe(piece -> {
+     *     try {
+     *         downloadData.write(piece.array());
+     *     } catch (IOException ex) {
+     *         throw new UncheckedIOException(ex);
+     *     }
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -924,7 +1054,18 @@ public Flux download() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadStream} + * + *
+     * ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
+     * client.downloadStream().subscribe(piece -> {
+     *     try {
+     *         downloadData.write(piece.array());
+     *     } catch (IOException ex) {
+     *         throw new UncheckedIOException(ex);
+     *     }
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -947,7 +1088,13 @@ public Flux downloadStream() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobAsyncClient.downloadContent} + * + *
+     * client.downloadContent().subscribe(data -> {
+     *     System.out.printf("Downloaded %s", data.toString());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -973,7 +1120,23 @@ public Mono downloadContent() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadWithResponse#BlobRange-DownloadRetryOptions-BlobRequestConditions-boolean} + * + *
+     * BlobRange range = new BlobRange(1024, (long) 2048);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * client.downloadWithResponse(range, options, null, false).subscribe(response -> {
+     *     ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
+     *     response.getValue().subscribe(piece -> {
+     *         try {
+     *             downloadData.write(piece.array());
+     *         } catch (IOException ex) {
+     *             throw new UncheckedIOException(ex);
+     *         }
+     *     });
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -999,7 +1162,23 @@ public Mono downloadWithResponse(BlobRange range, Dow * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadStreamWithResponse#BlobRange-DownloadRetryOptions-BlobRequestConditions-boolean} + * + *
+     * BlobRange range = new BlobRange(1024, (long) 2048);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * client.downloadStreamWithResponse(range, options, null, false).subscribe(response -> {
+     *     ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
+     *     response.getValue().subscribe(piece -> {
+     *         try {
+     *             downloadData.write(piece.array());
+     *         } catch (IOException ex) {
+     *             throw new UncheckedIOException(ex);
+     *         }
+     *     });
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1028,7 +1207,16 @@ public Mono downloadStreamWithResponse(BlobRange rang * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadContentWithResponse#DownloadRetryOptions-BlobRequestConditions} + * + *
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * client.downloadContentWithResponse(options, null).subscribe(response -> {
+     *     BinaryData content = response.getValue();
+     *     System.out.println(content.toString());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1146,7 +1334,11 @@ private Mono downloadRange(BlobRange range, BlobRequestCondition * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFile#String} + * + *
+     * client.downloadToFile(file).subscribe(response -> System.out.println("Completed download to file"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1167,7 +1359,12 @@ public Mono downloadToFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFile#String-boolean} + * + *
+     * boolean overwrite = false; // Default value
+     * client.downloadToFile(file, overwrite).subscribe(response -> System.out.println("Completed download to file"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1203,7 +1400,15 @@ public Mono downloadToFile(String filePath, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFileWithResponse#String-BlobRange-ParallelTransferOptions-DownloadRetryOptions-BlobRequestConditions-boolean} + * + *
+     * BlobRange range = new BlobRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * client.downloadToFileWithResponse(file, range, null, options, null, false)
+     *     .subscribe(response -> System.out.println("Completed download to file"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1236,7 +1441,17 @@ public Mono> downloadToFileWithResponse(String filePath * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFileWithResponse#String-BlobRange-ParallelTransferOptions-DownloadRetryOptions-BlobRequestConditions-boolean-Set} + * + *
+     * BlobRange blobRange = new BlobRange(1024, 2048L);
+     * DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions().setMaxRetryRequests(5);
+     * Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
+     *     StandardOpenOption.WRITE, StandardOpenOption.READ)); // Default options
+     *
+     * client.downloadToFileWithResponse(file, blobRange, null, downloadRetryOptions, null, false, openOptions)
+     *     .subscribe(response -> System.out.println("Completed download to file"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1279,7 +1494,16 @@ public Mono> downloadToFileWithResponse(String filePath * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.downloadToFileWithResponse#BlobDownloadToFileOptions} + * + *
+     * client.downloadToFileWithResponse(new BlobDownloadToFileOptions(file)
+     *     .setRange(new BlobRange(1024, 2018L))
+     *     .setDownloadRetryOptions(new DownloadRetryOptions().setMaxRetryRequests(5))
+     *     .setOpenOptions(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE,
+     *         StandardOpenOption.READ))))
+     *     .subscribe(response -> System.out.println("Completed download to file"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1427,7 +1651,11 @@ private void downloadToFileCleanup(AsynchronousFileChannel channel, String fileP * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.delete} + * + *
+     * client.delete().doOnSuccess(response -> System.out.println("Completed delete"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1450,7 +1678,12 @@ public Mono delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.deleteWithResponse#DeleteSnapshotsOptionType-BlobRequestConditions} + * + *
+     * client.deleteWithResponse(DeleteSnapshotsOptionType.INCLUDE, null)
+     *     .subscribe(response -> System.out.printf("Delete completed with status %d%n", response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1488,7 +1721,12 @@ Mono> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnap * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getProperties} + * + *
+     * client.getProperties().subscribe(response ->
+     *     System.out.printf("Type: %s, Size: %d%n", response.getBlobType(), response.getBlobSize()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1509,7 +1747,15 @@ public Mono getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getPropertiesWithResponse#BlobRequestConditions} + * + *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.getPropertiesWithResponse(requestConditions).subscribe(
+     *     response -> System.out.printf("Type: %s, Size: %d%n", response.getValue().getBlobType(),
+     *         response.getValue().getBlobSize()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1566,7 +1812,13 @@ Mono> getPropertiesWithResponse(BlobRequestConditions r * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeaders#BlobHttpHeaders} + * + *
+     * client.setHttpHeaders(new BlobHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1589,7 +1841,18 @@ public Mono setHttpHeaders(BlobHttpHeaders headers) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setHttpHeadersWithResponse#BlobHttpHeaders-BlobRequestConditions} + * + *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.setHttpHeadersWithResponse(new BlobHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary"), requestConditions).subscribe(
+     *         response ->
+     *             System.out.printf("Set HTTP headers completed with status %d%n",
+     *                 response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1625,7 +1888,11 @@ Mono> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobReq * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadata#Map} + * + *
+     * client.setMetadata(Collections.singletonMap("metadata", "value"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1649,7 +1916,14 @@ public Mono setMetadata(Map metadata) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setMetadataWithResponse#Map-BlobRequestConditions} + * + *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.setMetadataWithResponse(Collections.singletonMap("metadata", "value"), requestConditions)
+     *     .subscribe(response -> System.out.printf("Set metadata completed with status %d%n", response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1687,7 +1961,12 @@ Mono> setMetadataWithResponse(Map metadata, BlobR * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getTags} + * + *
+     * client.getTags().subscribe(response ->
+     *     System.out.printf("Num tags: %d%n", response.size()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1704,7 +1983,12 @@ public Mono> getTags() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getTagsWithResponse#BlobGetTagsOptions} + * + *
+     * client.getTagsWithResponse(new BlobGetTagsOptions()).subscribe(response ->
+     *     System.out.printf("Status code: %d. Num tags: %d%n", response.getStatusCode(), response.getValue().size()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1742,7 +2026,11 @@ Mono>> getTagsWithResponse(BlobGetTagsOptions optio * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setTags#Map} + * + *
+     * client.setTags(Collections.singletonMap("tag", "value"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1761,7 +2049,12 @@ public Mono setTags(Map tags) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setTagsWithResponse#BlobSetTagsOptions} + * + *
+     * client.setTagsWithResponse(new BlobSetTagsOptions(Collections.singletonMap("tag", "value")))
+     *     .subscribe(response -> System.out.printf("Set tags completed with stats %d%n", response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1800,7 +2093,13 @@ Mono> setTagsWithResponse(BlobSetTagsOptions options, Context con * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshot} + * + *
+     * client.createSnapshot()
+     *     .subscribe(response -> System.out.printf("Identifier for the snapshot is %s%n",
+     *         response.getSnapshotId()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1822,7 +2121,15 @@ public Mono createSnapshot() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.createSnapshotWithResponse#Map-BlobRequestConditions} + * + *
+     * Map<String, String> snapshotMetadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.createSnapshotWithResponse(snapshotMetadata, requestConditions)
+     *     .subscribe(response -> System.out.printf("Identifier for the snapshot is %s%n", response.getValue()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1863,7 +2170,11 @@ Mono> createSnapshotWithResponse(MapCode Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTier#AccessTier} + * + *
+     * client.setAccessTier(AccessTier.HOT);
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1889,7 +2200,13 @@ public Mono setAccessTier(AccessTier tier) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTierWithResponse#AccessTier-RehydratePriority-String} + * + *
+     * client.setAccessTierWithResponse(AccessTier.HOT, RehydratePriority.STANDARD, leaseId)
+     *     .subscribe(response -> System.out.printf("Set tier completed with status code %d%n",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1918,7 +2235,16 @@ public Mono> setAccessTierWithResponse(AccessTier tier, Rehydrate * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setAccessTierWithResponse#BlobSetAccessTierOptions} + * + *
+     * client.setAccessTierWithResponse(new BlobSetAccessTierOptions(AccessTier.HOT)
+     *     .setPriority(RehydratePriority.STANDARD)
+     *     .setLeaseId(leaseId)
+     *     .setTagsConditions(tags))
+     *     .subscribe(response -> System.out.printf("Set tier completed with status code %d%n",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1950,7 +2276,11 @@ Mono> setTierWithResponse(BlobSetAccessTierOptions options, Conte * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undelete} + * + *
+     * client.undelete().doOnSuccess(response -> System.out.println("Completed undelete"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1971,7 +2301,12 @@ public Mono undelete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.undeleteWithResponse} + * + *
+     * client.undeleteWithResponse()
+     *     .subscribe(response -> System.out.printf("Undelete completed with status %d%n", response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1997,7 +2332,12 @@ Mono> undeleteWithResponse(Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfo} + * + *
+     * client.getAccountInfo().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n",
+     *     response.getAccountKind(), response.getSkuName()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -2018,7 +2358,12 @@ public Mono getAccountInfo() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.getAccountInfoWithResponse} + * + *
+     * client.getAccountInfoWithResponse().subscribe(response -> System.out.printf("Account Kind: %s, SKU: %s%n",
+     *     response.getValue().getAccountKind(), response.getValue().getSkuName()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -2048,7 +2393,17 @@ Mono> getAccountInfoWithResponse(Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.generateUserDelegationSas#BlobServiceSasSignatureValues-UserDelegationKey} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobSasPermission myPermission = new BlobSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey);
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -2069,7 +2424,17 @@ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServic * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.generateUserDelegationSas#BlobServiceSasSignatureValues-UserDelegationKey-String-Context} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobSasPermission myPermission = new BlobSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey, accountName, new Context("key", "value"));
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -2094,7 +2459,17 @@ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServic * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.generateSas#BlobServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobSasPermission permission = new BlobSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @@ -2111,7 +2486,18 @@ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureV * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.generateSas#BlobServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobSasPermission permission = new BlobSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. @@ -2132,7 +2518,19 @@ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureV * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.query#String} + * + *
+     * ByteArrayOutputStream queryData = new ByteArrayOutputStream();
+     * String expression = "SELECT * from BlobStorage";
+     * client.query(expression).subscribe(piece -> {
+     *     try {
+     *         queryData.write(piece.array());
+     *     } catch (IOException ex) {
+     *         throw new UncheckedIOException(ex);
+     *     }
+     * });
+     * 
+ * * * @param expression The query expression. * @return A reactive response containing the queried data. @@ -2151,7 +2549,41 @@ public Flux query(String expression) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.queryWithResponse#BlobQueryOptions} + * + *
+     * String expression = "SELECT * from BlobStorage";
+     * BlobQueryJsonSerialization input = new BlobQueryJsonSerialization()
+     *     .setRecordSeparator('\n');
+     * BlobQueryDelimitedSerialization output = new BlobQueryDelimitedSerialization()
+     *     .setEscapeChar('\0')
+     *     .setColumnSeparator(',')
+     *     .setRecordSeparator('\n')
+     *     .setFieldQuote('\'')
+     *     .setHeadersPresent(true);
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * Consumer<BlobQueryError> errorConsumer = System.out::println;
+     * Consumer<BlobQueryProgress> progressConsumer = progress -> System.out.println("total blob bytes read: "
+     *     + progress.getBytesScanned());
+     * BlobQueryOptions queryOptions = new BlobQueryOptions(expression)
+     *     .setInputSerialization(input)
+     *     .setOutputSerialization(output)
+     *     .setRequestConditions(requestConditions)
+     *     .setErrorConsumer(errorConsumer)
+     *     .setProgressConsumer(progressConsumer);
+     *
+     * client.queryWithResponse(queryOptions)
+     *     .subscribe(response -> {
+     *         ByteArrayOutputStream queryData = new ByteArrayOutputStream();
+     *         response.getValue().subscribe(piece -> {
+     *             try {
+     *                 queryData.write(piece.array());
+     *             } catch (IOException ex) {
+     *                 throw new UncheckedIOException(ex);
+     *             }
+     *         });
+     *     });
+     * 
+ * * * @param queryOptions {@link BlobQueryOptions The query options}. * @return A reactive response containing the queried data. @@ -2201,7 +2633,15 @@ qr, getCustomerProvidedKey(), context) * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setImmutabilityPolicy#BlobImmutabilityPolicy} + * + *
+     * BlobImmutabilityPolicy policy = new BlobImmutabilityPolicy()
+     *     .setPolicyMode(BlobImmutabilityPolicyMode.LOCKED)
+     *     .setExpiryTime(OffsetDateTime.now().plusDays(1));
+     * client.setImmutabilityPolicy(policy).subscribe(response -> System.out.println("Completed. Set immutability "
+     *     + "policy to " + response.getPolicyMode()));
+     * 
+ * * * @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}. * @return A reactive response containing the immutability policy. @@ -2222,7 +2662,17 @@ public Mono setImmutabilityPolicy(BlobImmutabilityPolicy * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setImmutabilityPolicyWithResponse#BlobImmutabilityPolicy-BlobRequestConditions} + * + *
+     * BlobImmutabilityPolicy immutabilityPolicy = new BlobImmutabilityPolicy()
+     *     .setPolicyMode(BlobImmutabilityPolicyMode.LOCKED)
+     *     .setExpiryTime(OffsetDateTime.now().plusDays(1));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(1));
+     * client.setImmutabilityPolicyWithResponse(immutabilityPolicy, requestConditions).subscribe(response ->
+     *     System.out.println("Completed. Set immutability policy to " + response.getValue().getPolicyMode()));
+     * 
+ * * * @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}. * @param requestConditions {@link BlobRequestConditions} @@ -2279,7 +2729,12 @@ Mono> setImmutabilityPolicyWithResponse( * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.deleteImmutabilityPolicy} + * + *
+     * client.deleteImmutabilityPolicy().subscribe(response -> System.out.println("Completed immutability policy"
+     *     + " deletion."));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -2299,7 +2754,12 @@ public Mono deleteImmutabilityPolicy() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.deleteImmutabilityPolicyWithResponse} + * + *
+     * client.deleteImmutabilityPolicyWithResponse().subscribe(response ->
+     *     System.out.println("Delete immutability policy completed with status: " + response.getStatusCode()));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -2326,7 +2786,12 @@ Mono> deleteImmutabilityPolicyWithResponse(Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setLegalHold#boolean} + * + *
+     * client.setLegalHold(true).subscribe(response -> System.out.println("Legal hold status: "
+     *     + response.hasLegalHold()));
+     * 
+ * * * @param legalHold Whether or not you want a legal hold on the blob. * @return A reactive response containing the legal hold result. @@ -2347,7 +2812,12 @@ public Mono setLegalHold(boolean legalHold) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobAsyncClientBase.setLegalHoldWithResponse#boolean} + * + *
+     * client.setLegalHoldWithResponse(true).subscribe(response ->
+     *     System.out.println("Legal hold status: " + response.getValue().hasLegalHold()));
+     * 
+ * * * @param legalHold Whether or not you want a legal hold on the blob. * @return A reactive response containing the legal hold result. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobClientBase.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobClientBase.java index 35f5263460b97..706d2b9a7c292 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobClientBase.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobClientBase.java @@ -169,7 +169,12 @@ public String getAccountName() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName} + * + *
+     * String containerName = client.getContainerName();
+     * System.out.println("The name of the container is " + containerName);
+     * 
+ * * * @return The name of the container. */ @@ -182,7 +187,12 @@ public final String getContainerName() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerClient} + * + *
+     * BlobContainerClient containerClient = client.getContainerClient();
+     * System.out.println("The name of the container is " + containerClient.getBlobContainerName());
+     * 
+ * * * @return {@link BlobContainerClient} */ @@ -195,7 +205,12 @@ public BlobContainerClient getContainerClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName} + * + *
+     * String blobName = client.getBlobName();
+     * System.out.println("The name of the blob is " + blobName);
+     * 
+ * * * @return The decoded name of the blob. */ @@ -348,7 +363,11 @@ public BlobInputStream openInputStream(BlobInputStreamOptions options) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists} + * + *
+     * System.out.printf("Exists? %b%n", client.exists());
+     * 
+ * * * @return true if the blob exists, false if it doesn't */ @@ -362,7 +381,11 @@ public Boolean exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse#Duration-Context} + * + *
+     * System.out.printf("Exists? %b%n", client.existsWithResponse(timeout, new Context(key2, value2)).getValue());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -385,7 +408,13 @@ public Response existsWithResponse(Duration timeout, Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy#String-Duration} + * + *
+     * final SyncPoller<BlobCopyInfo, Void> poller = client.beginCopy(url, Duration.ofSeconds(2));
+     * PollResponse<BlobCopyInfo> pollResponse = poller.poll();
+     * System.out.printf("Copy identifier: %s%n", pollResponse.getValue().getCopyId());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -415,7 +444,19 @@ public SyncPoller beginCopy(String sourceUrl, Duration pollI * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy#String-Map-AccessTier-RehydratePriority-RequestConditions-BlobRequestConditions-Duration} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * SyncPoller<BlobCopyInfo, Void> poller = client.beginCopy(url, metadata, AccessTier.HOT,
+     *     RehydratePriority.STANDARD, modifiedRequestConditions, blobRequestConditions, Duration.ofSeconds(2));
+     *
+     * PollResponse<BlobCopyInfo> response = poller.waitUntil(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
+     * System.out.printf("Copy identifier: %s%n", response.getValue().getCopyId());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -454,7 +495,22 @@ public SyncPoller beginCopy(String sourceUrl, MapCode Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy#BlobBeginCopyOptions} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobBeginCopySourceRequestConditions modifiedRequestConditions = new BlobBeginCopySourceRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * SyncPoller<BlobCopyInfo, Void> poller = client.beginCopy(new BlobBeginCopyOptions(url).setMetadata(metadata)
+     *     .setTags(tags).setTier(AccessTier.HOT).setRehydratePriority(RehydratePriority.STANDARD)
+     *     .setSourceRequestConditions(modifiedRequestConditions)
+     *     .setDestinationRequestConditions(blobRequestConditions).setPollInterval(Duration.ofSeconds(2)));
+     *
+     * PollResponse<BlobCopyInfo> response = poller.waitUntil(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
+     * System.out.printf("Copy identifier: %s%n", response.getValue().getCopyId());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -474,7 +530,12 @@ public SyncPoller beginCopy(BlobBeginCopyOptions options) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl#String} + * + *
+     * client.abortCopyFromUrl(copyId);
+     * System.out.println("Aborted copy completed.");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -491,7 +552,13 @@ public void abortCopyFromUrl(String copyId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse#String-String-Duration-Context} + * + *
+     * System.out.printf("Aborted copy completed with status %d%n",
+     *     client.abortCopyFromUrlWithResponse(copyId, leaseId, timeout,
+     *         new Context(key2, value2)).getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -516,7 +583,11 @@ public Response abortCopyFromUrlWithResponse(String copyId, String leaseId * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl#String} + * + *
+     * System.out.printf("Copy identifier: %s%n", client.copyFromUrl(url));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -538,7 +609,19 @@ public String copyFromUrl(String copySource) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse#String-Map-AccessTier-RequestConditions-BlobRequestConditions-Duration-Context} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * System.out.printf("Copy identifier: %s%n",
+     *     client.copyFromUrlWithResponse(url, metadata, AccessTier.HOT, modifiedRequestConditions,
+     *         blobRequestConditions, timeout,
+     *         new Context(key1, value1)).getValue());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -574,7 +657,21 @@ public Response copyFromUrlWithResponse(String copySource, MapCode Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse#BlobCopyFromUrlOptions-Duration-Context} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * System.out.printf("Copy identifier: %s%n",
+     *     client.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(url).setMetadata(metadata).setTags(tags)
+     *         .setTier(AccessTier.HOT).setSourceRequestConditions(modifiedRequestConditions)
+     *         .setDestinationRequestConditions(blobRequestConditions), timeout,
+     *         new Context(key1, value1)).getValue());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -600,7 +697,12 @@ public Response copyFromUrlWithResponse(BlobCopyFromUrlOptions options, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download#OutputStream} + * + *
+     * client.download(new ByteArrayOutputStream());
+     * System.out.println("Download completed.");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -622,7 +724,12 @@ public void download(OutputStream stream) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStream#OutputStream} + * + *
+     * client.downloadStream(new ByteArrayOutputStream());
+     * System.out.println("Download completed.");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -642,7 +749,12 @@ public void downloadStream(OutputStream stream) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.BlobClient.downloadContent} + * + *
+     * BinaryData data = client.downloadContent();
+     * System.out.printf("Downloaded %s", data.toString());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -664,7 +776,16 @@ public BinaryData downloadContent() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse#OutputStream-BlobRange-DownloadRetryOptions-BlobRequestConditions-boolean-Duration-Context} + * + *
+     * BlobRange range = new BlobRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * System.out.printf("Download completed with status %d%n",
+     *     client.downloadWithResponse(new ByteArrayOutputStream(), range, options, null, false,
+     *         timeout, new Context(key2, value2)).getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -698,7 +819,16 @@ public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadStreamWithResponse#OutputStream-BlobRange-DownloadRetryOptions-BlobRequestConditions-boolean-Duration-Context} + * + *
+     * BlobRange range = new BlobRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * System.out.printf("Download completed with status %d%n",
+     *     client.downloadStreamWithResponse(new ByteArrayOutputStream(), range, options, null, false,
+     *         timeout, new Context(key2, value2)).getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -739,7 +869,17 @@ public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, Blob * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadContentWithResponse#DownloadRetryOptions-BlobRequestConditions-Duration-Context} + * + *
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * BlobDownloadContentResponse contentResponse = client.downloadContentWithResponse(options, null,
+     *     timeout, new Context(key2, value2));
+     * BinaryData content = contentResponse.getValue();
+     * System.out.printf("Download completed with status %d and content%s%n",
+     *     contentResponse.getStatusCode(), content.toString());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -780,7 +920,12 @@ public BlobDownloadContentResponse downloadContentWithResponse( * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile#String} + * + *
+     * client.downloadToFile(file);
+     * System.out.println("Completed download to file");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -802,7 +947,13 @@ public BlobProperties downloadToFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile#String-boolean} + * + *
+     * boolean overwrite = false; // Default value
+     * client.downloadToFile(file, overwrite);
+     * System.out.println("Completed download to file");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -835,7 +986,16 @@ public BlobProperties downloadToFile(String filePath, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse#String-BlobRange-ParallelTransferOptions-DownloadRetryOptions-BlobRequestConditions-boolean-Duration-Context} + * + *
+     * BlobRange range = new BlobRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * client.downloadToFileWithResponse(file, range, new ParallelTransferOptions().setBlockSizeLong(4L * Constants.MB),
+     *     options, null, false, timeout, new Context(key2, value2));
+     * System.out.println("Completed download to file");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -869,7 +1029,18 @@ public Response downloadToFileWithResponse(String filePath, Blob * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse#String-BlobRange-ParallelTransferOptions-DownloadRetryOptions-BlobRequestConditions-boolean-Set-Duration-Context} + * + *
+     * BlobRange blobRange = new BlobRange(1024, 2048L);
+     * DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions().setMaxRetryRequests(5);
+     * Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
+     *     StandardOpenOption.WRITE, StandardOpenOption.READ)); // Default options
+     *
+     * client.downloadToFileWithResponse(file, blobRange, new ParallelTransferOptions().setBlockSizeLong(4L * Constants.MB),
+     *     downloadRetryOptions, null, false, openOptions, timeout, new Context(key2, value2));
+     * System.out.println("Completed download to file");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -909,7 +1080,16 @@ public Response downloadToFileWithResponse(String filePath, Blob * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse#BlobDownloadToFileOptions-Duration-Context} + * + *
+     * client.downloadToFileWithResponse(new BlobDownloadToFileOptions(file)
+     *     .setRange(new BlobRange(1024, 2018L))
+     *     .setDownloadRetryOptions(new DownloadRetryOptions().setMaxRetryRequests(5))
+     *     .setOpenOptions(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE,
+     *         StandardOpenOption.READ))), timeout, new Context(key2, value2));
+     * System.out.println("Completed download to file");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -934,7 +1114,12 @@ public Response downloadToFileWithResponse(BlobDownloadToFileOpt * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete} + * + *
+     * client.delete();
+     * System.out.println("Delete completed.");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -951,7 +1136,13 @@ public void delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse#DeleteSnapshotsOptionType-BlobRequestConditions-Duration-Context} + * + *
+     * System.out.printf("Delete completed with status %d%n",
+     *     client.deleteWithResponse(DeleteSnapshotsOptionType.INCLUDE, null, timeout,
+     *         new Context(key1, value1)).getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -978,7 +1169,12 @@ public Response deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSna * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties} + * + *
+     * BlobProperties properties = client.getProperties();
+     * System.out.printf("Type: %s, Size: %d%n", properties.getBlobType(), properties.getBlobSize());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -995,7 +1191,15 @@ public BlobProperties getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse#BlobRequestConditions-Duration-Context} + * + *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * BlobProperties properties = client.getPropertiesWithResponse(requestConditions, timeout,
+     *     new Context(key2, value2)).getValue();
+     * System.out.printf("Type: %s, Size: %d%n", properties.getBlobType(), properties.getBlobSize());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1019,7 +1223,14 @@ public Response getPropertiesWithResponse(BlobRequestConditions * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders#BlobHttpHeaders} + * + *
+     * client.setHttpHeaders(new BlobHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary"));
+     * System.out.println("Set HTTP headers completed");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1037,7 +1248,17 @@ public void setHttpHeaders(BlobHttpHeaders headers) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse#BlobHttpHeaders-BlobRequestConditions-Duration-Context} + * + *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * System.out.printf("Set HTTP headers completed with status %d%n",
+     *     client.setHttpHeadersWithResponse(new BlobHttpHeaders()
+     *         .setContentLanguage("en-US")
+     *         .setContentType("binary"), requestConditions, timeout, new Context(key1, value1))
+     *         .getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1063,7 +1284,12 @@ public Response setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRe * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata#Map} + * + *
+     * client.setMetadata(Collections.singletonMap("metadata", "value"));
+     * System.out.println("Set metadata completed");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1082,7 +1308,15 @@ public void setMetadata(Map metadata) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse#Map-BlobRequestConditions-Duration-Context} + * + *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * System.out.printf("Set metadata completed with status %d%n",
+     *     client.setMetadataWithResponse(Collections.singletonMap("metadata", "value"), requestConditions, timeout,
+     *         new Context(key1, value1)).getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1107,7 +1341,12 @@ public Response setMetadataWithResponse(Map metadata, Blob * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags} + * + *
+     * Map<String, String> tags = client.getTags();
+     * System.out.printf("Number of tags: %d%n", tags.size());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1124,7 +1363,13 @@ public Map getTags() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse#BlobGetTagsOptions-Duration-Context} + * + *
+     * Map<String, String> tags = client.getTagsWithResponse(new BlobGetTagsOptions(), timeout,
+     *     new Context(key1, value1)).getValue();
+     * System.out.printf("Number of tags: %d%n", tags.size());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1148,7 +1393,12 @@ public Response> getTagsWithResponse(BlobGetTagsOptions opti * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags#Map} + * + *
+     * client.setTags(Collections.singletonMap("tag", "value"));
+     * System.out.println("Set tag completed");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1166,7 +1416,14 @@ public void setTags(Map tags) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse#BlobSetTagsOptions-Duration-Context} + * + *
+     * System.out.printf("Set metadata completed with status %d%n",
+     *     client.setTagsWithResponse(new BlobSetTagsOptions(Collections.singletonMap("tag", "value")), timeout,
+     *         new Context(key1, value1))
+     *         .getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1188,7 +1445,11 @@ public Response setTagsWithResponse(BlobSetTagsOptions options, Duration t * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot} + * + *
+     * System.out.printf("Identifier for the snapshot is %s%n", client.createSnapshot().getSnapshotId());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1207,7 +1468,16 @@ public BlobClientBase createSnapshot() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse#Map-BlobRequestConditions-Duration-Context} + * + *
+     * Map<String, String> snapshotMetadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * System.out.printf("Identifier for the snapshot is %s%n",
+     *     client.createSnapshotWithResponse(snapshotMetadata, requestConditions, timeout,
+     *         new Context(key1, value1)).getValue());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1238,7 +1508,12 @@ public Response createSnapshotWithResponse(Map m * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier#AccessTier} + * + *
+     * client.setAccessTier(AccessTier.HOT);
+     * System.out.println("Set tier completed.");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1258,7 +1533,13 @@ public void setAccessTier(AccessTier tier) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse#AccessTier-RehydratePriority-String-Duration-Context} + * + *
+     * System.out.printf("Set tier completed with status code %d%n",
+     *     client.setAccessTierWithResponse(AccessTier.HOT, RehydratePriority.STANDARD, leaseId, timeout,
+     *         new Context(key2, value2)).getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1285,7 +1566,16 @@ public Response setAccessTierWithResponse(AccessTier tier, RehydratePriori * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse#BlobSetAccessTierOptions-Duration-Context} + * + *
+     * System.out.printf("Set tier completed with status code %d%n",
+     *     client.setAccessTierWithResponse(new BlobSetAccessTierOptions(AccessTier.HOT)
+     *         .setPriority(RehydratePriority.STANDARD)
+     *         .setLeaseId(leaseId)
+     *         .setTagsConditions(tags),
+     *         timeout, new Context(key2, value2)).getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1306,7 +1596,12 @@ public Response setAccessTierWithResponse(BlobSetAccessTierOptions options * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete} + * + *
+     * client.undelete();
+     * System.out.println("Undelete completed");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1321,7 +1616,12 @@ public void undelete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse#Duration-Context} + * + *
+     * System.out.printf("Undelete completed with status %d%n", client.undeleteWithResponse(timeout,
+     *     new Context(key1, value1)).getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1342,7 +1642,12 @@ public Response undeleteWithResponse(Duration timeout, Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo} + * + *
+     * StorageAccountInfo accountInfo = client.getAccountInfo();
+     * System.out.printf("Account Kind: %s, SKU: %s%n", accountInfo.getAccountKind(), accountInfo.getSkuName());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1359,7 +1664,12 @@ public StorageAccountInfo getAccountInfo() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse#Duration-Context} + * + *
+     * StorageAccountInfo accountInfo = client.getAccountInfoWithResponse(timeout, new Context(key1, value1)).getValue();
+     * System.out.printf("Account Kind: %s, SKU: %s%n", accountInfo.getAccountKind(), accountInfo.getSkuName());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1381,7 +1691,17 @@ public Response getAccountInfoWithResponse(Duration timeout, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas#BlobServiceSasSignatureValues-UserDelegationKey} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobSasPermission myPermission = new BlobSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey);
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -1400,7 +1720,17 @@ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServic * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas#BlobServiceSasSignatureValues-UserDelegationKey-String-Context} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobSasPermission myPermission = new BlobSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues myValues = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey, accountName, new Context(key1, value1));
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -1423,7 +1753,17 @@ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServic * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas#BlobServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobSasPermission permission = new BlobSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @@ -1440,7 +1780,18 @@ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureV * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas#BlobServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * BlobSasPermission permission = new BlobSasPermission().setReadPermission(true);
+     *
+     * BlobServiceSasSignatureValues values = new BlobServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context(key1, value1));
+     * 
+ * * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. @@ -1459,7 +1810,13 @@ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureV * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream#String} + * + *
+     * String expression = "SELECT * from BlobStorage";
+     * InputStream inputStream = client.openQueryInputStream(expression);
+     * // Now you can read from the input stream like you would normally.
+     * 
+ * * * @param expression The query expression. * @return An InputStream object that represents the stream to use for reading the query response. @@ -1477,7 +1834,33 @@ public InputStream openQueryInputStream(String expression) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream#BlobQueryOptions} + * + *
+     * String expression = "SELECT * from BlobStorage";
+     * BlobQuerySerialization input = new BlobQueryDelimitedSerialization()
+     *     .setColumnSeparator(',')
+     *     .setEscapeChar('\n')
+     *     .setRecordSeparator('\n')
+     *     .setHeadersPresent(true)
+     *     .setFieldQuote('"');
+     * BlobQuerySerialization output = new BlobQueryJsonSerialization()
+     *     .setRecordSeparator('\n');
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId("leaseId");
+     * Consumer<BlobQueryError> errorConsumer = System.out::println;
+     * Consumer<BlobQueryProgress> progressConsumer = progress -> System.out.println("total blob bytes read: "
+     *     + progress.getBytesScanned());
+     * BlobQueryOptions queryOptions = new BlobQueryOptions(expression)
+     *     .setInputSerialization(input)
+     *     .setOutputSerialization(output)
+     *     .setRequestConditions(requestConditions)
+     *     .setErrorConsumer(errorConsumer)
+     *     .setProgressConsumer(progressConsumer);
+     *
+     * InputStream inputStream = client.openQueryInputStreamWithResponse(queryOptions).getValue();
+     * // Now you can read from the input stream like you would normally.
+     * 
+ * * * @param queryOptions {@link BlobQueryOptions The query options}. * @return A response containing status code and HTTP headers including an InputStream object @@ -1505,7 +1888,14 @@ public Response openQueryInputStreamWithResponse(BlobQueryOptions q * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query#OutputStream-String} + * + *
+     * ByteArrayOutputStream queryData = new ByteArrayOutputStream();
+     * String expression = "SELECT * from BlobStorage";
+     * client.query(queryData, expression);
+     * System.out.println("Query completed.");
+     * 
+ * * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param expression The query expression. @@ -1525,7 +1915,33 @@ public void query(OutputStream stream, String expression) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse#BlobQueryOptions-Duration-Context} + * + *
+     * ByteArrayOutputStream queryData = new ByteArrayOutputStream();
+     * String expression = "SELECT * from BlobStorage";
+     * BlobQueryJsonSerialization input = new BlobQueryJsonSerialization()
+     *     .setRecordSeparator('\n');
+     * BlobQueryDelimitedSerialization output = new BlobQueryDelimitedSerialization()
+     *     .setEscapeChar('\0')
+     *     .setColumnSeparator(',')
+     *     .setRecordSeparator('\n')
+     *     .setFieldQuote('\'')
+     *     .setHeadersPresent(true);
+     * BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * Consumer<BlobQueryError> errorConsumer = System.out::println;
+     * Consumer<BlobQueryProgress> progressConsumer = progress -> System.out.println("total blob bytes read: "
+     *     + progress.getBytesScanned());
+     * BlobQueryOptions queryOptions = new BlobQueryOptions(expression, queryData)
+     *     .setInputSerialization(input)
+     *     .setOutputSerialization(output)
+     *     .setRequestConditions(requestConditions)
+     *     .setErrorConsumer(errorConsumer)
+     *     .setProgressConsumer(progressConsumer);
+     * System.out.printf("Query completed with status %d%n",
+     *     client.queryWithResponse(queryOptions, timeout, new Context(key1, value1))
+     *         .getStatusCode());
+     * 
+ * * * @param queryOptions {@link BlobQueryOptions The query options}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -1559,7 +1975,15 @@ public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Durati * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicy#BlobImmutabilityPolicy} + * + *
+     * BlobImmutabilityPolicy policy = new BlobImmutabilityPolicy()
+     *     .setPolicyMode(BlobImmutabilityPolicyMode.LOCKED)
+     *     .setExpiryTime(OffsetDateTime.now().plusDays(1));
+     * BlobImmutabilityPolicy setPolicy = client.setImmutabilityPolicy(policy);
+     * System.out.println("Successfully completed setting the immutability policy");
+     * 
+ * * * @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}. * @return The immutability policy. @@ -1576,7 +2000,18 @@ public BlobImmutabilityPolicy setImmutabilityPolicy(BlobImmutabilityPolicy immut * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setImmutabilityPolicyWithResponse#BlobImmutabilityPolicy-BlobRequestConditions-Duration-Context} + * + *
+     * BlobImmutabilityPolicy immutabilityPolicy = new BlobImmutabilityPolicy()
+     *     .setPolicyMode(BlobImmutabilityPolicyMode.LOCKED)
+     *     .setExpiryTime(OffsetDateTime.now().plusDays(1));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(1));
+     * Response<BlobImmutabilityPolicy> response = client.setImmutabilityPolicyWithResponse(immutabilityPolicy,
+     *     requestConditions, timeout, new Context(key1, value1));
+     * System.out.println("Successfully completed setting the immutability policy");
+     * 
+ * * * @param immutabilityPolicy {@link BlobImmutabilityPolicy The immutability policy}. * @param requestConditions {@link BlobRequestConditions} @@ -1600,7 +2035,12 @@ public Response setImmutabilityPolicyWithResponse(BlobIm * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicy} + * + *
+     * client.deleteImmutabilityPolicy();
+     * System.out.println("Completed immutability policy deletion.");
+     * 
+ * * */ @ServiceMethod(returns = ReturnType.SINGLE) @@ -1615,7 +2055,12 @@ public void deleteImmutabilityPolicy() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteImmutabilityPolicyWithResponse#Duration-Context} + * + *
+     * System.out.println("Delete immutability policy completed with status: "
+     *     + client.deleteImmutabilityPolicyWithResponse(timeout, new Context(key1, value1)).getStatusCode());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -1635,7 +2080,11 @@ public Response deleteImmutabilityPolicyWithResponse(Duration timeout, Con * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHold#boolean} + * + *
+     * System.out.println("Legal hold status: " + client.setLegalHold(true));
+     * 
+ * * * @param legalHold Whether or not you want a legal hold on the blob. * @return The legal hold result. @@ -1652,7 +2101,12 @@ public BlobLegalHoldResult setLegalHold(boolean legalHold) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setLegalHoldWithResponse#boolean-Duration-Context} + * + *
+     * System.out.println("Legal hold status: " + client.setLegalHoldWithResponse(true, timeout,
+     *     new Context(key1, value1)));
+     * 
+ * * * @param legalHold Whether or not you want a legal hold on the blob. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseAsyncClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseAsyncClient.java index d233a8a3fad11..ac77d25960bc4 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseAsyncClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseAsyncClient.java @@ -42,9 +42,21 @@ * *

Instantiating a BlobLeaseAsyncClient

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClientBuilder.asyncInstantiationWithBlob} + * + *
+ * BlobLeaseAsyncClient blobLeaseAsyncClient = new BlobLeaseClientBuilder()
+ *     .blobAsyncClient(blobAsyncClient)
+ *     .buildAsyncClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClientBuilder.asyncInstantiationWithContainer} + * + *
+ * BlobLeaseAsyncClient blobLeaseAsyncClient = new BlobLeaseClientBuilder()
+ *     .containerAsyncClient(blobContainerAsyncClient)
+ *     .buildAsyncClient();
+ * 
+ * * *

View {@link BlobLeaseClientBuilder this} for additional ways to construct the client.

* @@ -110,7 +122,11 @@ public String getLeaseId() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.acquireLease#int} + * + *
+     * client.acquireLease(60).subscribe(response -> System.out.printf("Lease ID is %s%n", response));
+     * 
+ * * * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. * @return A reactive response containing the lease ID. @@ -130,7 +146,15 @@ public Mono acquireLease(int duration) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.acquireLeaseWithResponse#int-RequestConditions} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.acquireLeaseWithResponse(60, modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("Lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and @@ -155,7 +179,18 @@ public Mono> acquireLeaseWithResponse(int duration, RequestCond * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.acquireLeaseWithResponse#BlobAcquireLeaseOptions} + * + *
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobAcquireLeaseOptions options = new BlobAcquireLeaseOptions(60)
+     *     .setRequestConditions(requestConditions);
+     *
+     * client.acquireLeaseWithResponse(options).subscribe(response ->
+     *     System.out.printf("Lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param options {@link BlobAcquireLeaseOptions} * @return A reactive response containing the lease ID. @@ -200,7 +235,11 @@ Mono> acquireLeaseWithResponse(BlobAcquireLeaseOptions options, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.renewLease} + * + *
+     * client.renewLease().subscribe(response -> System.out.printf("Renewed lease ID is %s%n", response));
+     * 
+ * * * @return A reactive response containing the renewed lease ID. */ @@ -218,7 +257,15 @@ public Mono renewLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.renewLeaseWithResponse#RequestConditions} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.renewLeaseWithResponse(modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("Renewed lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given @@ -241,7 +288,18 @@ public Mono> renewLeaseWithResponse(RequestConditions modifiedR * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.renewLeaseWithResponse#BlobRenewLeaseOptions} + * + *
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobRenewLeaseOptions options = new BlobRenewLeaseOptions()
+     *     .setRequestConditions(requestConditions);
+     *
+     * client.renewLeaseWithResponse(options).subscribe(response ->
+     *     System.out.printf("Lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param options {@link BlobRenewLeaseOptions} * @return A reactive response containing the renewed lease ID. @@ -285,7 +343,11 @@ Mono> renewLeaseWithResponse(BlobRenewLeaseOptions options, Con * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.releaseLease} + * + *
+     * client.releaseLease().subscribe(response -> System.out.println("Completed release lease"));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -303,7 +365,15 @@ public Mono releaseLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.releaseLeaseWithResponse#RequestConditions} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.releaseLeaseWithResponse(modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("Release lease completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given @@ -326,7 +396,18 @@ public Mono> releaseLeaseWithResponse(RequestConditions modifiedR * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.releaseLeaseWithResponse#BlobReleaseLeaseOptions} + * + *
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobReleaseLeaseOptions options = new BlobReleaseLeaseOptions()
+     *     .setRequestConditions(requestConditions);
+     *
+     * client.releaseLeaseWithResponse(options).subscribe(response ->
+     *     System.out.printf("Release lease completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param options {@link BlobReleaseLeaseOptions} * @return A reactive response signalling completion. @@ -366,7 +447,12 @@ Mono> releaseLeaseWithResponse(BlobReleaseLeaseOptions options, C * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.breakLease} + * + *
+     * client.breakLease().subscribe(response ->
+     *     System.out.printf("The broken lease has %d seconds remaining on the lease", response));
+     * 
+ * * * @return A reactive response containing the remaining time in the broken lease in seconds. */ @@ -387,7 +473,16 @@ public Mono breakLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.breakLeaseWithResponse#Integer-RequestConditions} + * + *
+     * Integer retainLeaseInSeconds = 5;
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.breakLeaseWithResponse(retainLeaseInSeconds, modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("The broken lease has %d seconds remaining on the lease", response.getValue()));
+     * 
+ * * * @param breakPeriodInSeconds An optional duration, between 0 and 60 seconds, that the lease should continue before * it is broken. If the break period is longer than the time remaining on the lease the remaining time on the lease @@ -419,7 +514,20 @@ public Mono> breakLeaseWithResponse(Integer breakPeriodInSecon * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.breakLeaseWithResponse#BlobBreakLeaseOptions} + * + *
+     * Integer retainLeaseInSeconds = 5;
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobBreakLeaseOptions options = new BlobBreakLeaseOptions()
+     *     .setBreakPeriod(Duration.ofSeconds(retainLeaseInSeconds))
+     *     .setRequestConditions(requestConditions);
+     *
+     * client.breakLeaseWithResponse(options).subscribe(response ->
+     *     System.out.printf("The broken lease has %d seconds remaining on the lease", response.getValue()));
+     * 
+ * * * @param options {@link BlobBreakLeaseOptions} * @return A reactive response containing the remaining time in the broken lease in seconds. @@ -461,7 +569,11 @@ Mono> breakLeaseWithResponse(BlobBreakLeaseOptions options, Co * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.changeLease#String} + * + *
+     * client.changeLease("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response));
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @return A reactive response containing the new lease ID. @@ -480,7 +592,15 @@ public Mono changeLease(String proposedId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.changeLeaseWithResponse#String-RequestConditions} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.changeLeaseWithResponse("proposedId", modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("Changed lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and @@ -505,7 +625,18 @@ public Mono> changeLeaseWithResponse(String proposedId, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseAsyncClient.changeLeaseWithResponse#BlobChangeLeaseOptions} + * + *
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobChangeLeaseOptions options = new BlobChangeLeaseOptions("proposedId")
+     *     .setRequestConditions(requestConditions);
+     *
+     * client.changeLeaseWithResponse(options).subscribe(response ->
+     *     System.out.printf("Changed lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param options {@link BlobChangeLeaseOptions} * @return A reactive response containing the new lease ID. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseClient.java index ff70e9162f369..2a4eeff38e4d4 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseClient.java @@ -28,9 +28,21 @@ * *

Instantiating a BlobLeaseClient

* - * {@codesnippet com.azure.storage.blob.specialized.LeaseClientBuilder.syncInstantiationWithBlob} + * + *
+ * BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder()
+ *     .blobClient(blobClient)
+ *     .buildClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.blob.specialized.LeaseClientBuilder.syncInstantiationWithContainer} + * + *
+ * BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder()
+ *     .containerClient(blobContainerClient)
+ *     .buildClient();
+ * 
+ * * *

View {@link BlobLeaseClientBuilder this} for additional ways to construct the client.

* @@ -74,7 +86,11 @@ public String getLeaseId() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.acquireLease#int} + * + *
+     * System.out.printf("Lease ID is %s%n", client.acquireLease(60));
+     * 
+ * * * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. * @return The lease ID. @@ -90,7 +106,16 @@ public String acquireLease(int duration) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.acquireLeaseWithResponse#int-RequestConditions-Duration-Context} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("Lease ID is %s%n", client
+     *     .acquireLeaseWithResponse(60, modifiedRequestConditions, timeout, new Context(key, value))
+     *     .getValue());
+     * 
+ * * * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and @@ -114,7 +139,19 @@ public Response acquireLeaseWithResponse(int duration, RequestConditions * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.acquireLeaseWithResponse#BlobAcquireLeaseOptions-Duration-Context} + * + *
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobAcquireLeaseOptions options = new BlobAcquireLeaseOptions(60)
+     *     .setRequestConditions(requestConditions);
+     *
+     * System.out.printf("Lease ID is %s%n", client
+     *     .acquireLeaseWithResponse(options, timeout, new Context(key, value))
+     *     .getValue());
+     * 
+ * * * @param options {@link BlobAcquireLeaseOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -133,7 +170,11 @@ public Response acquireLeaseWithResponse(BlobAcquireLeaseOptions options * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.renewLease} + * + *
+     * System.out.printf("Renewed lease ID is %s%n", client.renewLease());
+     * 
+ * * * @return The renewed lease ID. */ @@ -147,7 +188,16 @@ public String renewLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.renewLeaseWithResponse#RequestConditions-Duration-Context} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("Renewed lease ID is %s%n",
+     *     client.renewLeaseWithResponse(modifiedRequestConditions, timeout, new Context(key, value))
+     *         .getValue());
+     * 
+ * * * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given @@ -169,7 +219,19 @@ public Response renewLeaseWithResponse(RequestConditions modifiedRequest * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.renewLeaseWithResponse#BlobRenewLeaseOptions-Duration-Context} + * + *
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobRenewLeaseOptions options = new BlobRenewLeaseOptions()
+     *     .setRequestConditions(requestConditions);
+     *
+     * System.out.printf("Renewed lease ID is %s%n",
+     *     client.renewLeaseWithResponse(options, timeout, new Context(key, value))
+     *         .getValue());
+     * 
+ * * * @param options {@link BlobRenewLeaseOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -187,7 +249,12 @@ public Response renewLeaseWithResponse(BlobRenewLeaseOptions options, Du * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.releaseLease} + * + *
+     * client.releaseLease();
+     * System.out.println("Release lease completed");
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public void releaseLease() { @@ -199,7 +266,16 @@ public void releaseLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.releaseLeaseWithResponse#RequestConditions-Duration-Context} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("Release lease completed with status %d%n",
+     *     client.releaseLeaseWithResponse(modifiedRequestConditions, timeout, new Context(key, value))
+     *         .getStatusCode());
+     * 
+ * * * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given @@ -221,7 +297,19 @@ public Response releaseLeaseWithResponse(RequestConditions modifiedRequest * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.releaseLeaseWithResponse#BlobReleaseLeaseOptions-Duration-Context} + * + *
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobReleaseLeaseOptions options = new BlobReleaseLeaseOptions()
+     *     .setRequestConditions(requestConditions);
+     *
+     * System.out.printf("Release lease completed with status %d%n",
+     *     client.releaseLeaseWithResponse(options, timeout, new Context(key, value))
+     *         .getStatusCode());
+     * 
+ * * * @param options {@link BlobReleaseLeaseOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -240,7 +328,11 @@ public Response releaseLeaseWithResponse(BlobReleaseLeaseOptions options, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.breakLease} + * + *
+     * System.out.printf("The broken lease has %d seconds remaining on the lease", client.breakLease());
+     * 
+ * * * @return The remaining time in the broken lease in seconds. */ @@ -257,7 +349,17 @@ public Integer breakLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.breakLeaseWithResponse#Integer-RequestConditions-Duration-Context} + * + *
+     * Integer retainLeaseInSeconds = 5;
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("The broken lease has %d seconds remaining on the lease", client
+     *     .breakLeaseWithResponse(retainLeaseInSeconds, modifiedRequestConditions, timeout, new Context(key, value))
+     *     .getValue());
+     * 
+ * * * @param breakPeriodInSeconds An optional duration, between 0 and 60 seconds, that the lease should continue before * it is broken. If the break period is longer than the time remaining on the lease the remaining time on the lease @@ -287,7 +389,21 @@ public Response breakLeaseWithResponse(Integer breakPeriodInSeconds, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.breakLeaseWithResponse#BlobBreakLeaseOptions-Duration-Context} + * + *
+     * Integer retainLeaseInSeconds = 5;
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobBreakLeaseOptions options = new BlobBreakLeaseOptions()
+     *     .setBreakPeriod(Duration.ofSeconds(retainLeaseInSeconds))
+     *     .setRequestConditions(requestConditions);
+     *
+     * System.out.printf("The broken lease has %d seconds remaining on the lease", client
+     *     .breakLeaseWithResponse(options, timeout, new Context(key, value))
+     *     .getValue());
+     * 
+ * * * @param options {@link BlobBreakLeaseOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -304,7 +420,11 @@ public Response breakLeaseWithResponse(BlobBreakLeaseOptions options, D * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.changeLease#String} + * + *
+     * System.out.printf("Changed lease ID is %s%n", client.changeLease("proposedId"));
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @return The new lease ID. @@ -319,7 +439,16 @@ public String changeLease(String proposedId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.changeLeaseWithResponse#String-RequestConditions-Duration-Context} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("Changed lease ID is %s%n",
+     *     client.changeLeaseWithResponse("proposedId", modifiedRequestConditions, timeout, new Context(key, value))
+     *         .getValue());
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and @@ -342,7 +471,19 @@ public Response changeLeaseWithResponse(String proposedId, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClient.changeLeaseWithResponse#BlobChangeLeaseOptions-Duration-Context} + * + *
+     * BlobLeaseRequestConditions requestConditions = new BlobLeaseRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * BlobChangeLeaseOptions options = new BlobChangeLeaseOptions("proposedId")
+     *     .setRequestConditions(requestConditions);
+     *
+     * System.out.printf("Changed lease ID is %s%n",
+     *     client.changeLeaseWithResponse(options, timeout, new Context(key, value))
+     *         .getValue());
+     * 
+ * * * @param options {@link BlobChangeLeaseOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseClientBuilder.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseClientBuilder.java index dda6052f40bd0..b4ffa33271398 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseClientBuilder.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlobLeaseClientBuilder.java @@ -27,15 +27,43 @@ * *

Instantiating LeaseClients

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClientBuilder.syncInstantiationWithBlobAndLeaseId} + * + *
+ * BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder()
+ *     .blobClient(blobClient)
+ *     .leaseId(leaseId)
+ *     .buildClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClientBuilder.syncInstantiationWithContainerAndLeaseId} + * + *
+ * BlobLeaseClient blobLeaseClient = new BlobLeaseClientBuilder()
+ *     .containerClient(blobContainerClient)
+ *     .leaseId(leaseId)
+ *     .buildClient();
+ * 
+ * * *

Instantiating LeaseAsyncClients

* - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClientBuilder.asyncInstantiationWithBlobAndLeaseId} + * + *
+ * BlobLeaseAsyncClient blobLeaseAsyncClient = new BlobLeaseClientBuilder()
+ *     .blobAsyncClient(blobAsyncClient)
+ *     .leaseId(leaseId)
+ *     .buildAsyncClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.blob.specialized.BlobLeaseClientBuilder.asyncInstantiationWithContainerAndLeaseId} + * + *
+ * BlobLeaseAsyncClient blobLeaseAsyncClient = new BlobLeaseClientBuilder()
+ *     .containerAsyncClient(blobContainerAsyncClient)
+ *     .leaseId(leaseId)
+ *     .buildAsyncClient();
+ * 
+ * * * @see BlobLeaseClient * @see BlobLeaseAsyncClient diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlockBlobAsyncClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlockBlobAsyncClient.java index b74e7be2e755a..4af4d8ec586ce 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlockBlobAsyncClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlockBlobAsyncClient.java @@ -173,7 +173,13 @@ public BlockBlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKe * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.upload#Flux-long} + * + *
+     * client.upload(data, length).subscribe(response ->
+     *     System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. @@ -202,7 +208,14 @@ public Mono upload(Flux data, long length) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.upload#Flux-long-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.upload(data, length, overwrite).subscribe(response ->
+     *     System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. @@ -240,7 +253,24 @@ public Mono upload(Flux data, long length, boolean ov * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.uploadWithResponse#Flux-long-BlobHttpHeaders-Map-AccessTier-byte-BlobRequestConditions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.uploadWithResponse(data, length, headers, metadata, AccessTier.HOT, md5, requestConditions)
+     *     .subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
+     * 
+ * * * @param data The data to write to the blob. Note that this {@code Flux} must be replayable if retries are enabled * (the default). In other words, the Flux must produce the same data each time it is subscribed to. @@ -279,7 +309,27 @@ public Mono> uploadWithResponse(Flux data, l * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.uploadWithResponse#BlockBlobSimpleUploadOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.uploadWithResponse(new BlockBlobSimpleUploadOptions(data, length).setHeaders(headers)
+     *     .setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT).setContentMd5(md5)
+     *     .setRequestConditions(requestConditions))
+     *     .subscribe(response -> System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
+     * 
+ * * * @param options {@link BlockBlobSimpleUploadOptions} * @return A reactive response containing the information of the uploaded block blob. @@ -332,7 +382,14 @@ Mono> uploadWithResponse(BlockBlobSimpleUploadOptions op * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.uploadFromUrl#String} + * + *
+     * client.uploadFromUrl(sourceUrl)
+     *     .subscribe(response ->
+     *         System.out.printf("Uploaded BlockBlob from URL, MD5 is %s%n",
+     *             Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param sourceUrl The source URL to upload from. * @return A reactive response containing the information of the uploaded block blob. @@ -356,7 +413,14 @@ public Mono uploadFromUrl(String sourceUrl) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.uploadFromUrl#String-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.uploadFromUrl(sourceUrl, overwrite).subscribe(response ->
+     *     System.out.printf("Uploaded BlockBlob from URL, MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getContentMd5())));
+     * 
+ * * * @param sourceUrl The source URL to upload from. * @param overwrite Whether or not to overwrite, should data exist on the blob. @@ -389,7 +453,27 @@ public Mono uploadFromUrl(String sourceUrl, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.uploadFromUrlWithResponse#BlobUploadFromUrlOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.uploadFromUrlWithResponse(new BlobUploadFromUrlOptions(sourceUrl).setHeaders(headers)
+     *     .setTags(tags).setTier(AccessTier.HOT).setContentMd5(md5)
+     *     .setDestinationRequestConditions(requestConditions))
+     *     .subscribe(response -> System.out.printf("Uploaded BlockBlob from URL, MD5 is %s%n",
+     *         Base64.getEncoder().encodeToString(response.getValue().getContentMd5())));
+     * 
+ * * * @param options {@link BlobUploadFromUrlOptions} * @return A reactive response containing the information of the uploaded block blob. @@ -462,7 +546,14 @@ Mono> uploadFromUrlWithResponse(BlobUploadFromUrlOptions * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.stageBlock#String-Flux-long} + * + *
+     * client.stageBlock(base64BlockID, data, length)
+     *     .subscribe(
+     *         response -> System.out.println("Staging block completed"),
+     *         error -> System.out.printf("Error when calling stage Block: %s", error));
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono stageBlock(String base64BlockId, Flux data, long length) { @@ -483,7 +574,12 @@ public Mono stageBlock(String base64BlockId, Flux data, long l * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.stageBlockWithResponse#String-Flux-long-byte-String} + * + *
+     * client.stageBlockWithResponse(base64BlockID, data, length, md5, leaseId).subscribe(response ->
+     *     System.out.printf("Staging block completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. @@ -526,7 +622,14 @@ base64BlockId, length, data, contentMd5, null, null, leaseId, null, getCustomerP * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.stageBlockFromUrl#String-String-BlobRange} + * + *
+     * client.stageBlockFromUrl(base64BlockID, sourceUrl, new BlobRange(offset, count))
+     *     .subscribe(
+     *         response -> System.out.println("Staging block completed"),
+     *         error -> System.out.printf("Error when calling stage Block: %s", error));
+     * 
+ * * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. @@ -555,7 +658,16 @@ public Mono stageBlockFromUrl(String base64BlockId, String sourceUrl, Blob * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.stageBlockFromUrlWithResponse#String-String-BlobRange-byte-String-BlobRequestConditions} + * + *
+     * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.stageBlockFromUrlWithResponse(base64BlockID, sourceUrl, new BlobRange(offset, count), null,
+     *     leaseId, sourceRequestConditions).subscribe(response ->
+     *     System.out.printf("Staging block from URL completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. @@ -591,7 +703,17 @@ public Mono> stageBlockFromUrlWithResponse(String base64BlockId, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.stageBlockFromUrlWithResponse#BlockBlobStageBlockFromUrlOptions} + * + *
+     * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.stageBlockFromUrlWithResponse(new BlockBlobStageBlockFromUrlOptions(base64BlockID, sourceUrl)
+     *     .setSourceRange(new BlobRange(offset, count)).setLeaseId(leaseId)
+     *     .setSourceRequestConditions(sourceRequestConditions)).subscribe(response ->
+     *     System.out.printf("Staging block from URL completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param options parameters for the operation. * @return A reactive response signalling completion. @@ -635,7 +757,17 @@ Mono> stageBlockFromUrlWithResponse(BlockBlobStageBlockFromUrlOpt * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.listBlocks#BlockListType} + * + *
+     * client.listBlocks(BlockListType.ALL).subscribe(block -> {
+     *     System.out.println("Committed Blocks:");
+     *     block.getCommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     *
+     *     System.out.println("Uncommitted Blocks:");
+     *     block.getUncommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     * });
+     * 
+ * * * @param listType Specifies which type of blocks to return. * @@ -658,7 +790,18 @@ public Mono listBlocks(BlockListType listType) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.listBlocksWithResponse#BlockListType-String} + * + *
+     * client.listBlocksWithResponse(BlockListType.ALL, leaseId).subscribe(response -> {
+     *     BlockList block = response.getValue();
+     *     System.out.println("Committed Blocks:");
+     *     block.getCommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     *
+     *     System.out.println("Uncommitted Blocks:");
+     *     block.getUncommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     * });
+     * 
+ * * * @param listType Specifies which type of blocks to return. * @param leaseId The lease ID the active lease on the blob must match. @@ -681,7 +824,22 @@ public Mono> listBlocksWithResponse(BlockListType listType, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.listBlocksWithResponse#BlockBlobListBlocksOptions} + * + *
+     * client.listBlocksWithResponse(new BlockBlobListBlocksOptions(BlockListType.ALL)
+     *     .setLeaseId(leaseId)
+     *     .setIfTagsMatch(tags)).subscribe(response -> {
+     *         BlockList block = response.getValue();
+     *         System.out.println("Committed Blocks:");
+     *         block.getCommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(),
+     *             b.getSizeLong()));
+     *
+     *         System.out.println("Uncommitted Blocks:");
+     *         block.getUncommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(),
+     *             b.getSizeLong()));
+     *     });
+     * 
+ * * * @param options {@link BlockBlobListBlocksOptions} * @return A reactive response containing the list of blocks. @@ -714,7 +872,12 @@ Mono> listBlocksWithResponse(BlockBlobListBlocksOptions opti * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.commitBlockList#List} + * + *
+     * client.commitBlockList(Collections.singletonList(base64BlockID)).subscribe(response ->
+     *     System.out.printf("Committing block list completed. Last modified: %s%n", response.getLastModified()));
+     * 
+ * * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @return A reactive response containing the information of the block blob. @@ -738,7 +901,13 @@ public Mono commitBlockList(List base64BlockIds) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.commitBlockList#List-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.commitBlockList(Collections.singletonList(base64BlockID), overwrite).subscribe(response ->
+     *     System.out.printf("Committing block list completed. Last modified: %s%n", response.getLastModified()));
+     * 
+ * * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @param overwrite Whether or not to overwrite, should data exist on the blob. @@ -770,7 +939,22 @@ public Mono commitBlockList(List base64BlockIds, boolean * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.commitBlockListWithResponse#List-BlobHttpHeaders-Map-AccessTier-BlobRequestConditions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * client.commitBlockListWithResponse(Collections.singletonList(base64BlockID), headers, metadata,
+     *     AccessTier.HOT, requestConditions).subscribe(response ->
+     *         System.out.printf("Committing block list completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @param headers {@link BlobHttpHeaders} @@ -800,7 +984,25 @@ public Mono> commitBlockListWithResponse(List ba * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobAsyncClient.commitBlockListWithResponse#BlockBlobCommitBlockListOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * client.commitBlockListWithResponse(new BlockBlobCommitBlockListOptions(Collections.singletonList(base64BlockID))
+     *     .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT)
+     *     .setRequestConditions(requestConditions))
+     *     .subscribe(response ->
+     *     System.out.printf("Committing block list completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param options {@link BlockBlobCommitBlockListOptions} * @return A reactive response containing the information of the block blob. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlockBlobClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlockBlobClient.java index d959856ca3203..e2579888c9632 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlockBlobClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/BlockBlobClient.java @@ -242,7 +242,12 @@ private BlobClientBuilder prepareBuilder() { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.upload#InputStream-long} + * + *
+     * System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *     Base64.getEncoder().encodeToString(client.upload(data, length).getContentMd5()));
+     * 
+ * * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link #getBlobOutputStream()} and writing to the returned OutputStream. @@ -266,7 +271,13 @@ public BlockBlobItem upload(InputStream data, long length) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.upload#InputStream-long-boolean} + * + *
+     * boolean overwrite = false;
+     * System.out.printf("Uploaded BlockBlob MD5 is %s%n",
+     *     Base64.getEncoder().encodeToString(client.upload(data, length, overwrite).getContentMd5()));
+     * 
+ * * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link #getBlobOutputStream()} and writing to the returned OutputStream. @@ -298,7 +309,29 @@ public BlockBlobItem upload(InputStream data, long length, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse#InputStream-long-BlobHttpHeaders-Map-AccessTier-byte-BlobRequestConditions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     *
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("Uploaded BlockBlob MD5 is %s%n", Base64.getEncoder()
+     *     .encodeToString(client.uploadWithResponse(data, length, headers, metadata, AccessTier.HOT, md5,
+     *         requestConditions, timeout, context)
+     *         .getValue()
+     *         .getContentMd5()));
+     * 
+ * * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link #getBlobOutputStream()} and writing to the returned OutputStream. @@ -343,7 +376,31 @@ public Response uploadWithResponse(InputStream data, long length, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse#BlockBlobSimpleUploadOptions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     *
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("Uploaded BlockBlob MD5 is %s%n", Base64.getEncoder()
+     *     .encodeToString(client.uploadWithResponse(new BlockBlobSimpleUploadOptions(data, length)
+     *         .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT).setContentMd5(md5)
+     *         .setRequestConditions(requestConditions), timeout, context)
+     *         .getValue()
+     *         .getContentMd5()));
+     * 
+ * * * @param options {@link BlockBlobSimpleUploadOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -377,7 +434,12 @@ public Response uploadWithResponse(BlockBlobSimpleUploadOptions o * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl#String} + * + *
+     * System.out.printf("Uploaded BlockBlob from URL, MD5 is %s%n",
+     *     Base64.getEncoder().encodeToString(client.uploadFromUrl(sourceUrl).getContentMd5()));
+     * 
+ * * * @param sourceUrl The source URL to upload from. * @return The information of the uploaded block blob. @@ -397,7 +459,13 @@ public BlockBlobItem uploadFromUrl(String sourceUrl) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl#String-boolean} + * + *
+     * boolean overwrite = false;
+     * System.out.printf("Uploaded BlockBlob from URL, MD5 is %s%n",
+     *     Base64.getEncoder().encodeToString(client.uploadFromUrl(sourceUrl, overwrite).getContentMd5()));
+     * 
+ * * * @param sourceUrl The source URL to upload from. * @param overwrite Whether or not to overwrite, should data exist on the blob. @@ -427,7 +495,31 @@ public BlockBlobItem uploadFromUrl(String sourceUrl, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrlWithResponse#BlobUploadFromUrlOptions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     *
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     *
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("Uploaded BlockBlob MD5 is %s%n", Base64.getEncoder()
+     *     .encodeToString(client.uploadFromUrlWithResponse(new BlobUploadFromUrlOptions(sourceUrl)
+     *         .setHeaders(headers).setTags(tags).setTier(AccessTier.HOT).setContentMd5(md5)
+     *         .setDestinationRequestConditions(requestConditions), timeout, context)
+     *         .getValue()
+     *         .getContentMd5()));
+     * 
+ * * * @param options {@link BlobUploadFromUrlOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -453,7 +545,11 @@ public Response uploadFromUrlWithResponse(BlobUploadFromUrlOption * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.stageBlock#String-InputStream-long} + * + *
+     * client.stageBlock(base64BlockId, data, length);
+     * 
+ * * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. @@ -475,7 +571,13 @@ public void stageBlock(String base64BlockId, InputStream data, long length) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.stageBlockWithResponse#String-InputStream-long-byte-String-Duration-Context} + * + *
+     * Context context = new Context("key", "value");
+     * System.out.printf("Staging block completed with status %d%n",
+     *     client.stageBlockWithResponse(base64BlockId, data, length, md5, leaseId, timeout, context).getStatusCode());
+     * 
+ * * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. @@ -516,7 +618,11 @@ public Response stageBlockWithResponse(String base64BlockId, InputStream d * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrl#String-String-BlobRange} + * + *
+     * client.stageBlockFromUrl(base64BlockId, sourceUrl, new BlobRange(offset, count));
+     * 
+ * * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. @@ -539,7 +645,17 @@ public void stageBlockFromUrl(String base64BlockId, String sourceUrl, BlobRange * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse#String-String-BlobRange-byte-String-BlobRequestConditions-Duration-Context} + * + *
+     * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("Staging block from URL completed with status %d%n",
+     *     client.stageBlockFromUrlWithResponse(base64BlockId, sourceUrl, new BlobRange(offset, count), null,
+     *         leaseId, sourceRequestConditions, timeout, context).getStatusCode());
+     * 
+ * * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. @@ -576,7 +692,18 @@ public Response stageBlockFromUrlWithResponse(String base64BlockId, String * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse#BlockBlobStageBlockFromUrlOptions-Duration-Context} + * + *
+     * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("Staging block from URL completed with status %d%n",
+     *     client.stageBlockFromUrlWithResponse(new BlockBlobStageBlockFromUrlOptions(base64BlockId, sourceUrl)
+     *         .setSourceRange(new BlobRange(offset, count)).setLeaseId(leaseId)
+     *         .setSourceRequestConditions(sourceRequestConditions), timeout, context).getStatusCode());
+     * 
+ * * * @param options Parameters for the operation * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -599,7 +726,17 @@ public Response stageBlockFromUrlWithResponse(BlockBlobStageBlockFromUrlOp * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.listBlocks#BlockListType} + * + *
+     * BlockList block = client.listBlocks(BlockListType.ALL);
+     *
+     * System.out.println("Committed Blocks:");
+     * block.getCommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     *
+     * System.out.println("Uncommitted Blocks:");
+     * block.getUncommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     * 
+ * * * @param listType Specifies which type of blocks to return. * @@ -616,7 +753,18 @@ public BlockList listBlocks(BlockListType listType) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse#BlockListType-String-Duration-Context} + * + *
+     * Context context = new Context("key", "value");
+     * BlockList block = client.listBlocksWithResponse(BlockListType.ALL, leaseId, timeout, context).getValue();
+     *
+     * System.out.println("Committed Blocks:");
+     * block.getCommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     *
+     * System.out.println("Uncommitted Blocks:");
+     * block.getUncommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     * 
+ * * * @param listType Specifies which type of blocks to return. * @param leaseId The lease ID the active lease on the blob must match. @@ -637,7 +785,20 @@ public Response listBlocksWithResponse(BlockListType listType, String * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse#BlockBlobListBlocksOptions-Duration-Context} + * + *
+     * Context context = new Context("key", "value");
+     * BlockList block = client.listBlocksWithResponse(new BlockBlobListBlocksOptions(BlockListType.ALL)
+     *     .setLeaseId(leaseId)
+     *     .setIfTagsMatch(tags), timeout, context).getValue();
+     *
+     * System.out.println("Committed Blocks:");
+     * block.getCommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     *
+     * System.out.println("Uncommitted Blocks:");
+     * block.getUncommittedBlocks().forEach(b -> System.out.printf("Name: %s, Size: %d", b.getName(), b.getSizeLong()));
+     * 
+ * * * @param options {@link BlockBlobListBlocksOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -661,7 +822,12 @@ public Response listBlocksWithResponse(BlockBlobListBlocksOptions opt * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList#List} + * + *
+     * System.out.printf("Committing block list completed. Last modified: %s%n",
+     *     client.commitBlockList(Collections.singletonList(base64BlockId)).getLastModified());
+     * 
+ * * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @return The information of the block blob. @@ -681,7 +847,13 @@ public BlockBlobItem commitBlockList(List base64BlockIds) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList#List-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * System.out.printf("Committing block list completed. Last modified: %s%n",
+     *     client.commitBlockList(Collections.singletonList(base64BlockId), overwrite).getLastModified());
+     * 
+ * * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @param overwrite Whether or not to overwrite, should data exist on the blob. @@ -709,7 +881,24 @@ public BlockBlobItem commitBlockList(List base64BlockIds, boolean overwr * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile#List-BlobHttpHeaders-Map-AccessTier-BlobRequestConditions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("Committing block list completed with status %d%n",
+     *     client.commitBlockListWithResponse(Collections.singletonList(base64BlockId), headers, metadata,
+     *         AccessTier.HOT, requestConditions, timeout, context).getStatusCode());
+     * 
+ * * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @param headers {@link BlobHttpHeaders} @@ -743,7 +932,28 @@ public Response commitBlockListWithResponse(List base64Bl * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile#BlockBlobCommitBlockListOptions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Map<String, String> tags = Collections.singletonMap("tag", "value");
+     * BlobRequestConditions requestConditions = new BlobRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("key", "value");
+     *
+     * System.out.printf("Committing block list completed with status %d%n",
+     *     client.commitBlockListWithResponse(
+     *         new BlockBlobCommitBlockListOptions(Collections.singletonList(base64BlockId)).setHeaders(headers)
+     *             .setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT)
+     *             .setRequestConditions(requestConditions), timeout, context)
+     *         .getStatusCode());
+     * 
+ * * * @param options {@link BlockBlobCommitBlockListOptions options} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/PageBlobAsyncClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/PageBlobAsyncClient.java index 12aef0e1baea5..3929226eadbba 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/PageBlobAsyncClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/PageBlobAsyncClient.java @@ -168,7 +168,12 @@ private static String pageRangeToString(PageRange pageRange) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.PageBlobAsyncClient.create#long} + * + *
+     * client.create(size).subscribe(response -> System.out.printf(
+     *     "Created page blob with sequence number %s%n", response.getBlobSequenceNumber()));
+     * 
+ * * * @param size Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a * 512-byte boundary. @@ -191,7 +196,13 @@ public Mono create(long size) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.PageBlobAsyncClient.create#long-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.create(size, overwrite).subscribe(response -> System.out.printf(
+     *     "Created page blob with sequence number %s%n", response.getBlobSequenceNumber()));
+     * 
+ * * * @param size Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a * 512-byte boundary. @@ -220,7 +231,19 @@ public Mono create(long size, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.createWithResponse#long-Long-BlobHttpHeaders-Map-BlobRequestConditions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.createWithResponse(size, sequenceNumber, headers, metadata, blobRequestConditions)
+     *     .subscribe(response -> System.out.printf(
+     *         "Created page blob with sequence number %s%n", response.getValue().getBlobSequenceNumber()));
+     *
+     * 
+ * * * @param size Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a * 512-byte boundary. @@ -251,7 +274,20 @@ public Mono> createWithResponse(long size, Long sequenceN * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.createWithResponse#PageBlobCreateOptions} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.createWithResponse(new PageBlobCreateOptions(size).setSequenceNumber(sequenceNumber)
+     *     .setHeaders(headers).setMetadata(metadata).setTags(tags).setRequestConditions(blobRequestConditions))
+     *     .subscribe(response -> System.out.printf(
+     *         "Created page blob with sequence number %s%n", response.getValue().getBlobSequenceNumber()));
+     *
+     * 
+ * * * @param options {@link PageBlobCreateOptions} * @return A reactive response containing the information of the created page blob. @@ -316,7 +352,16 @@ Mono> createWithResponse(PageBlobCreateOptions options, C * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.uploadPages#PageRange-Flux} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     *
+     * client.uploadPages(pageRange, body).subscribe(response -> System.out.printf(
+     *     "Uploaded page blob with sequence number %s%n", response.getBlobSequenceNumber()));
+     * 
+ * * * @param pageRange A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -344,7 +389,20 @@ public Mono uploadPages(PageRange pageRange, Flux body * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.uploadPagesWithResponse#PageRange-Flux-byte-PageBlobRequestConditions} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     *
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     * PageBlobRequestConditions pageBlobRequestConditions = new PageBlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.uploadPagesWithResponse(pageRange, body, md5, pageBlobRequestConditions)
+     *     .subscribe(response -> System.out.printf(
+     *         "Uploaded page blob with sequence number %s%n", response.getValue().getBlobSequenceNumber()));
+     * 
+ * * * @param pageRange A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -411,7 +469,17 @@ Mono> uploadPagesWithResponse(PageRange pageRange, FluxCode Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.uploadPagesFromUrl#PageRange-String-Long} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     *
+     * client.uploadPagesFromUrl(pageRange, url, sourceOffset)
+     *     .subscribe(response -> System.out.printf(
+     *         "Uploaded page blob from URL with sequence number %s%n", response.getBlobSequenceNumber()));
+     * 
+ * * * @param range A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -442,7 +510,23 @@ public Mono uploadPagesFromUrl(PageRange range, String sourceUrl, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.uploadPagesFromUrlWithResponse#PageRange-String-Long-byte-PageBlobRequestConditions-BlobRequestConditions} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     * InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
+     * byte[] sourceContentMD5 = new byte[512];
+     * PageBlobRequestConditions pageBlobRequestConditions = new PageBlobRequestConditions().setLeaseId(leaseId);
+     * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.uploadPagesFromUrlWithResponse(pageRange, url, sourceOffset, sourceContentMD5, pageBlobRequestConditions,
+     *         sourceRequestConditions)
+     *     .subscribe(response -> System.out.printf(
+     *         "Uploaded page blob from URL with sequence number %s%n", response.getValue().getBlobSequenceNumber()));
+     * 
+ * * * @param range The destination {@link PageRange} range. Given that pages must be aligned with 512-byte boundaries, * the start offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte @@ -483,7 +567,25 @@ public Mono> uploadPagesFromUrlWithResponse(PageRange ran * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.uploadPagesFromUrlWithResponse#PageBlobUploadPagesFromUrlOptions} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     * InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
+     * byte[] sourceContentMD5 = new byte[512];
+     * PageBlobRequestConditions pageBlobRequestConditions = new PageBlobRequestConditions().setLeaseId(leaseId);
+     * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.uploadPagesFromUrlWithResponse(new PageBlobUploadPagesFromUrlOptions(pageRange, url)
+     *     .setSourceOffset(sourceOffset).setSourceContentMd5(sourceContentMD5)
+     *     .setDestinationRequestConditions(pageBlobRequestConditions)
+     *     .setSourceRequestConditions(sourceRequestConditions))
+     *     .subscribe(response -> System.out.printf(
+     *         "Uploaded page blob from URL with sequence number %s%n", response.getValue().getBlobSequenceNumber()));
+     * 
+ * * * @param options Parameters for the operation. * @return A reactive response containing the information of the uploaded pages. @@ -552,7 +654,16 @@ Mono> uploadPagesFromUrlWithResponse(PageBlobUploadPagesF * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.clearPages#PageRange} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     *
+     * client.clearPages(pageRange).subscribe(response -> System.out.printf(
+     *     "Cleared page blob with sequence number %s%n", response.getBlobSequenceNumber()));
+     * 
+ * * * @param pageRange A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -575,7 +686,18 @@ public Mono clearPages(PageRange pageRange) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.clearPagesWithResponse#PageRange-PageBlobRequestConditions} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     * PageBlobRequestConditions pageBlobRequestConditions = new PageBlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.clearPagesWithResponse(pageRange, pageBlobRequestConditions)
+     *     .subscribe(response -> System.out.printf(
+     *         "Cleared page blob with sequence number %s%n", response.getValue().getBlobSequenceNumber()));
+     * 
+ * * * @param pageRange A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -633,7 +755,18 @@ Mono> clearPagesWithResponse(PageRange pageRange, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.getPageRanges#BlobRange} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     *
+     * client.getPageRanges(blobRange).subscribe(response -> {
+     *     System.out.println("Valid Page Ranges are:");
+     *     for (PageRange pageRange : response.getPageRange()) {
+     *         System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     *     }
+     * });
+     * 
+ * * * @param blobRange {@link BlobRange} * @@ -654,7 +787,20 @@ public Mono getPageRanges(BlobRange blobRange) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.getPageRangesWithResponse#BlobRange-BlobRequestConditions} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.getPageRangesWithResponse(blobRange, blobRequestConditions)
+     *     .subscribe(response -> {
+     *         System.out.println("Valid Page Ranges are:");
+     *         for (PageRange pageRange : response.getValue().getPageRange()) {
+     *             System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     *         }
+     *     });
+     * 
+ * * * @param blobRange {@link BlobRange} * @param requestConditions {@link BlobRequestConditions} @@ -692,7 +838,19 @@ Mono> getPageRangesWithResponse(BlobRange blobRange, BlobRequ * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.getPageRangesDiff#BlobRange-String} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * final String prevSnapshot = "previous snapshot";
+     *
+     * client.getPageRangesDiff(blobRange, prevSnapshot).subscribe(response -> {
+     *     System.out.println("Valid Page Ranges are:");
+     *     for (PageRange pageRange : response.getPageRange()) {
+     *         System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     *     }
+     * });
+     * 
+ * * * @param blobRange {@link BlobRange} * @param prevSnapshot Specifies that the response will contain only pages that were changed between target blob and @@ -717,7 +875,21 @@ public Mono getPageRangesDiff(BlobRange blobRange, String prevSnapshot * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.getPageRangesDiffWithResponse#BlobRange-String-BlobRequestConditions} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * final String prevSnapshot = "previous snapshot";
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.getPageRangesDiffWithResponse(blobRange, prevSnapshot, blobRequestConditions)
+     *     .subscribe(response -> {
+     *         System.out.println("Valid Page Ranges are:");
+     *         for (PageRange pageRange : response.getValue().getPageRange()) {
+     *             System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     *         }
+     *     });
+     * 
+ * * * @param blobRange {@link BlobRange} * @param prevSnapshot Specifies that the response will contain only pages that were changed between target blob and @@ -747,7 +919,19 @@ public Mono> getPageRangesDiffWithResponse(BlobRange blobRang * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.getManagedDiskPageRangesDiff#BlobRange-String} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * final String prevSnapshotUrl = "previous snapshot url";
+     *
+     * client.getPageRangesDiff(blobRange, prevSnapshotUrl).subscribe(response -> {
+     *     System.out.println("Valid Page Ranges are:");
+     *     for (PageRange pageRange : response.getPageRange()) {
+     *         System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     *     }
+     * });
+     * 
+ * * * @param blobRange {@link BlobRange} * @param prevSnapshotUrl Specifies the URL of a previous snapshot of the target blob. Specifies that the @@ -774,7 +958,21 @@ public Mono getManagedDiskPageRangesDiff(BlobRange blobRange, String p * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.getManagedDiskPageRangesDiffWithResponse#BlobRange-String-BlobRequestConditions} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * final String prevSnapshotUrl = "previous snapshot url";
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.getPageRangesDiffWithResponse(blobRange, prevSnapshotUrl, blobRequestConditions)
+     *     .subscribe(response -> {
+     *         System.out.println("Valid Page Ranges are:");
+     *         for (PageRange pageRange : response.getValue().getPageRange()) {
+     *             System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     *         }
+     *     });
+     * 
+ * * * @param blobRange {@link BlobRange} * @param prevSnapshotUrl Specifies the URL of a previous snapshot of the target blob. Specifies that the @@ -829,7 +1027,12 @@ Mono> getPageRangesDiffWithResponse(BlobRange blobRange, Stri * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.resize#long} + * + *
+     * client.resize(size).subscribe(response -> System.out.printf(
+     *     "Page blob resized with sequence number %s%n", response.getBlobSequenceNumber()));
+     * 
+ * * * @param size Resizes a page blob to the specified size. If the specified value is less than the current size of * the blob, then all pages above the specified value are cleared. @@ -851,7 +1054,15 @@ public Mono resize(long size) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.resizeWithResponse#long-BlobRequestConditions} + * + *
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.resizeWithResponse(size, blobRequestConditions)
+     *     .subscribe(response -> System.out.printf(
+     *         "Page blob resized with sequence number %s%n", response.getValue().getBlobSequenceNumber()));
+     * 
+ * * * @param size Resizes a page blob to the specified size. If the specified value is less than the current size of * the blob, then all pages above the specified value are cleared. @@ -901,7 +1112,13 @@ Mono> resizeWithResponse(long size, BlobRequestConditions * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.updateSequenceNumber#SequenceNumberActionType-Long} + * + *
+     * client.updateSequenceNumber(SequenceNumberActionType.INCREMENT, size)
+     *     .subscribe(response -> System.out.printf(
+     *         "Page blob updated to sequence number %s%n", response.getBlobSequenceNumber()));
+     * 
+ * * * @param action Indicates how the service should modify the blob's sequence number. * @param sequenceNumber The blob's sequence number. The sequence number is a user-controlled property that you can @@ -924,7 +1141,15 @@ public Mono updateSequenceNumber(SequenceNumberActionType action, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.updateSequenceNumberWithResponse#SequenceNumberActionType-Long-BlobRequestConditions} + * + *
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     *
+     * client.updateSequenceNumberWithResponse(SequenceNumberActionType.INCREMENT, size, blobRequestConditions)
+     *     .subscribe(response -> System.out.printf(
+     *         "Page blob updated to sequence number %s%n", response.getValue().getBlobSequenceNumber()));
+     * 
+ * * * @param action Indicates how the service should modify the blob's sequence number. * @param sequenceNumber The blob's sequence number. The sequence number is a user-controlled property that you can @@ -982,7 +1207,29 @@ Mono> updateSequenceNumberWithResponse(SequenceNumberActi * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.copyIncremental#String-String} + * + *
+     * final String snapshot = "copy snapshot";
+     * client.copyIncremental(url, snapshot).subscribe(statusType -> {
+     *     switch (statusType) {
+     *         case SUCCESS:
+     *             System.out.println("Page blob copied successfully");
+     *             break;
+     *         case FAILED:
+     *             System.out.println("Page blob copied failed");
+     *             break;
+     *         case ABORTED:
+     *             System.out.println("Page blob copied aborted");
+     *             break;
+     *         case PENDING:
+     *             System.out.println("Page blob copied pending");
+     *             break;
+     *         default:
+     *             break;
+     *     }
+     * });
+     * 
+ * * * @param source The source page blob. * @param snapshot The snapshot on the copy source. @@ -1010,7 +1257,35 @@ public Mono copyIncremental(String source, String snapshot) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.copyIncrementalWithResponse#String-String-RequestConditions} + * + *
+     * final String snapshot = "copy snapshot";
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfNoneMatch("snapshotMatch");
+     *
+     * client.copyIncrementalWithResponse(url, snapshot, modifiedRequestConditions)
+     *     .subscribe(response -> {
+     *         CopyStatusType statusType = response.getValue();
+     *
+     *         switch (statusType) {
+     *             case SUCCESS:
+     *                 System.out.println("Page blob copied successfully");
+     *                 break;
+     *             case FAILED:
+     *                 System.out.println("Page blob copied failed");
+     *                 break;
+     *             case ABORTED:
+     *                 System.out.println("Page blob copied aborted");
+     *                 break;
+     *             case PENDING:
+     *                 System.out.println("Page blob copied pending");
+     *                 break;
+     *             default:
+     *                 break;
+     *         }
+     *     });
+     * 
+ * * * @param source The source page blob. * @param snapshot The snapshot on the copy source. @@ -1046,7 +1321,36 @@ public Mono> copyIncrementalWithResponse(String source, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobAsyncClient.copyIncrementalWithResponse#PageBlobCopyIncrementalOptions} + * + *
+     * final String snapshot = "copy snapshot";
+     * PageBlobCopyIncrementalRequestConditions destinationRequestConditions = new PageBlobCopyIncrementalRequestConditions()
+     *     .setIfNoneMatch("snapshotMatch");
+     *
+     * client.copyIncrementalWithResponse(new PageBlobCopyIncrementalOptions(url, snapshot)
+     *     .setRequestConditions(destinationRequestConditions))
+     *     .subscribe(response -> {
+     *         CopyStatusType statusType = response.getValue();
+     *
+     *         switch (statusType) {
+     *             case SUCCESS:
+     *                 System.out.println("Page blob copied successfully");
+     *                 break;
+     *             case FAILED:
+     *                 System.out.println("Page blob copied failed");
+     *                 break;
+     *             case ABORTED:
+     *                 System.out.println("Page blob copied aborted");
+     *                 break;
+     *             case PENDING:
+     *                 System.out.println("Page blob copied pending");
+     *                 break;
+     *             default:
+     *                 break;
+     *         }
+     *     });
+     * 
+ * * * @param options {@link PageBlobCopyIncrementalOptions} * diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/PageBlobClient.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/PageBlobClient.java index b3981c599e5fd..a17e71d40690a 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/PageBlobClient.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/PageBlobClient.java @@ -133,7 +133,12 @@ public BlobOutputStream getBlobOutputStream(PageRange pageRange, BlobRequestCond * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.PageBlobClient.create#long} + * + *
+     * PageBlobItem pageBlob = client.create(size);
+     * System.out.printf("Created page blob with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param size Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a * 512-byte boundary. @@ -151,7 +156,13 @@ public PageBlobItem create(long size) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.PageBlobClient.create#long-boolean} + * + *
+     * boolean overwrite = false; // Default value
+     * PageBlobItem pageBlob = client.create(size, overwrite);
+     * System.out.printf("Created page blob with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param size Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a * 512-byte boundary. @@ -177,7 +188,21 @@ public PageBlobItem create(long size, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.createWithResponse#long-Long-BlobHttpHeaders-Map-BlobRequestConditions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * Context context = new Context(key, value);
+     *
+     * PageBlobItem pageBlob = client
+     *     .createWithResponse(size, sequenceNumber, headers, metadata, blobRequestConditions, timeout, context)
+     *     .getValue();
+     *
+     * System.out.printf("Created page blob with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param size Specifies the maximum size for the page blob, up to 8 TB. The page blob size must be aligned to a * 512-byte boundary. @@ -208,7 +233,24 @@ public Response createWithResponse(long size, Long sequenceNumber, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.createWithResponse#PageBlobCreateOptions-Duration-Context} + * + *
+     * BlobHttpHeaders headers = new BlobHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * Context context = new Context(key, value);
+     *
+     * PageBlobItem pageBlob = client
+     *     .createWithResponse(new PageBlobCreateOptions(size).setSequenceNumber(sequenceNumber)
+     *             .setHeaders(headers).setMetadata(metadata).setTags(tags)
+     *             .setRequestConditions(blobRequestConditions), timeout,
+     *         context)
+     *     .getValue();
+     *
+     * System.out.printf("Created page blob with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param options {@link PageBlobCreateOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -230,7 +272,17 @@ public Response createWithResponse(PageBlobCreateOptions options, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.uploadPages#PageRange-InputStream} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     * InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
+     *
+     * PageBlobItem pageBlob = client.uploadPages(pageRange, dataStream);
+     * System.out.printf("Uploaded page blob with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param pageRange A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -255,7 +307,22 @@ public PageBlobItem uploadPages(PageRange pageRange, InputStream body) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.uploadPagesWithResponse#PageRange-InputStream-byte-PageBlobRequestConditions-Duration-Context} + * + *
+     * byte[] md5 = MessageDigest.getInstance("MD5").digest("data".getBytes(StandardCharsets.UTF_8));
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     * InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
+     * PageBlobRequestConditions pageBlobRequestConditions = new PageBlobRequestConditions().setLeaseId(leaseId);
+     * Context context = new Context(key, value);
+     *
+     * PageBlobItem pageBlob = client
+     *     .uploadPagesWithResponse(pageRange, dataStream, md5, pageBlobRequestConditions, timeout, context).getValue();
+     *
+     * System.out.printf("Uploaded page blob with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param pageRange A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -294,7 +361,17 @@ public Response uploadPagesWithResponse(PageRange pageRange, Input * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.uploadPagesFromUrl#PageRange-String-Long} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     *
+     * PageBlobItem pageBlob = client.uploadPagesFromUrl(pageRange, url, sourceOffset);
+     *
+     * System.out.printf("Uploaded page blob from URL with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param range A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -321,7 +398,25 @@ public PageBlobItem uploadPagesFromUrl(PageRange range, String sourceUrl, Long s * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.uploadPagesFromUrlWithResponse#PageRange-String-Long-byte-PageBlobRequestConditions-BlobRequestConditions-Duration-Context} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     * InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
+     * byte[] sourceContentMD5 = new byte[512];
+     * PageBlobRequestConditions pageBlobRequestConditions = new PageBlobRequestConditions().setLeaseId(leaseId);
+     * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context(key, value);
+     *
+     * PageBlobItem pageBlob = client
+     *     .uploadPagesFromUrlWithResponse(pageRange, url, sourceOffset, sourceContentMD5, pageBlobRequestConditions,
+     *         sourceRequestConditions, timeout, context).getValue();
+     *
+     * System.out.printf("Uploaded page blob from URL with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param range The destination {@link PageRange} range. Given that pages must be aligned with 512-byte boundaries, * the start offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte @@ -359,7 +454,27 @@ public Response uploadPagesFromUrlWithResponse(PageRange range, St * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.uploadPagesFromUrlWithResponse#PageBlobUploadPagesFromUrlOptions-Duration-Context} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     * InputStream dataStream = new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
+     * byte[] sourceContentMD5 = new byte[512];
+     * PageBlobRequestConditions pageBlobRequestConditions = new PageBlobRequestConditions().setLeaseId(leaseId);
+     * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context(key, value);
+     *
+     * PageBlobItem pageBlob = client
+     *     .uploadPagesFromUrlWithResponse(new PageBlobUploadPagesFromUrlOptions(pageRange, url)
+     *         .setSourceOffset(sourceOffset).setSourceContentMd5(sourceContentMD5)
+     *         .setDestinationRequestConditions(pageBlobRequestConditions)
+     *         .setSourceRequestConditions(sourceRequestConditions), timeout, context).getValue();
+     *
+     * System.out.printf("Uploaded page blob from URL with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param options Parameters for the operation. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -381,7 +496,17 @@ public Response uploadPagesFromUrlWithResponse(PageBlobUploadPages * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.clearPages#PageRange} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     *
+     * PageBlobItem pageBlob = client.clearPages(pageRange);
+     *
+     * System.out.printf("Cleared page blob with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param pageRange A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -399,7 +524,20 @@ public PageBlobItem clearPages(PageRange pageRange) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.clearPagesWithResponse#PageRange-PageBlobRequestConditions-Duration-Context} + * + *
+     * PageRange pageRange = new PageRange()
+     *     .setStart(0)
+     *     .setEnd(511);
+     * PageBlobRequestConditions pageBlobRequestConditions = new PageBlobRequestConditions().setLeaseId(leaseId);
+     * Context context = new Context(key, value);
+     *
+     * PageBlobItem pageBlob = client
+     *     .clearPagesWithResponse(pageRange, pageBlobRequestConditions, timeout, context).getValue();
+     *
+     * System.out.printf("Cleared page blob with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param pageRange A {@link PageRange} object. Given that pages must be aligned with 512-byte boundaries, the start * offset must be a modulus of 512 and the end offset must be a modulus of 512 - 1. Examples of valid byte ranges @@ -424,7 +562,17 @@ public Response clearPagesWithResponse(PageRange pageRange, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.getPageRanges#BlobRange} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * PageList pageList = client.getPageRanges(blobRange);
+     *
+     * System.out.println("Valid Page Ranges are:");
+     * for (PageRange pageRange : pageList.getPageRange()) {
+     *     System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     * }
+     * 
+ * * * @param blobRange {@link BlobRange} * @return The information of the cleared pages. @@ -440,7 +588,21 @@ public PageList getPageRanges(BlobRange blobRange) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.getPageRangesWithResponse#BlobRange-BlobRequestConditions-Duration-Context} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * Context context = new Context(key, value);
+     *
+     * PageList pageList = client
+     *     .getPageRangesWithResponse(blobRange, blobRequestConditions, timeout, context).getValue();
+     *
+     * System.out.println("Valid Page Ranges are:");
+     * for (PageRange pageRange : pageList.getPageRange()) {
+     *     System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     * }
+     * 
+ * * * @param blobRange {@link BlobRange} * @param requestConditions {@link BlobRequestConditions} @@ -462,7 +624,18 @@ public Response getPageRangesWithResponse(BlobRange blobRange, BlobReq * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.getPageRangesDiff#BlobRange-String} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * final String prevSnapshot = "previous snapshot";
+     * PageList pageList = client.getPageRangesDiff(blobRange, prevSnapshot);
+     *
+     * System.out.println("Valid Page Ranges are:");
+     * for (PageRange pageRange : pageList.getPageRange()) {
+     *     System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     * }
+     * 
+ * * * @param blobRange {@link BlobRange} * @param prevSnapshot Specifies that the response will contain only pages that were changed between target blob and @@ -483,7 +656,22 @@ public PageList getPageRangesDiff(BlobRange blobRange, String prevSnapshot) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.getPageRangesDiffWithResponse#BlobRange-String-BlobRequestConditions-Duration-Context} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * final String prevSnapshot = "previous snapshot";
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * Context context = new Context(key, value);
+     *
+     * PageList pageList = client
+     *     .getPageRangesDiffWithResponse(blobRange, prevSnapshot, blobRequestConditions, timeout, context).getValue();
+     *
+     * System.out.println("Valid Page Ranges are:");
+     * for (PageRange pageRange : pageList.getPageRange()) {
+     *     System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     * }
+     * 
+ * * * @param blobRange {@link BlobRange} * @param prevSnapshot Specifies that the response will contain only pages that were changed between target blob and @@ -510,7 +698,18 @@ public Response getPageRangesDiffWithResponse(BlobRange blobRange, Str * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.getManagedDiskPageRangesDiff#BlobRange-String} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * final String prevSnapshotUrl = "previous snapshot url";
+     * PageList pageList = client.getPageRangesDiff(blobRange, prevSnapshotUrl);
+     *
+     * System.out.println("Valid Page Ranges are:");
+     * for (PageRange pageRange : pageList.getPageRange()) {
+     *     System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     * }
+     * 
+ * * * @param blobRange {@link BlobRange} * @param prevSnapshotUrl Specifies the URL of a previous snapshot of the target blob. Specifies that the @@ -533,7 +732,22 @@ public PageList getManagedDiskPageRangesDiff(BlobRange blobRange, String prevSna * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.getManagedDiskPageRangesDiffWithResponse#BlobRange-String-BlobRequestConditions-Duration-Context} + * + *
+     * BlobRange blobRange = new BlobRange(offset);
+     * final String prevSnapshotUrl = "previous snapshot url";
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * Context context = new Context(key, value);
+     *
+     * PageList pageList = client
+     *     .getPageRangesDiffWithResponse(blobRange, prevSnapshotUrl, blobRequestConditions, timeout, context).getValue();
+     *
+     * System.out.println("Valid Page Ranges are:");
+     * for (PageRange pageRange : pageList.getPageRange()) {
+     *     System.out.printf("Start: %s, End: %s%n", pageRange.getStart(), pageRange.getEnd());
+     * }
+     * 
+ * * * @param blobRange {@link BlobRange} * @param prevSnapshotUrl Specifies the URL of a previous snapshot of the target blob. Specifies that the @@ -559,7 +773,12 @@ public Response getManagedDiskPageRangesDiffWithResponse(BlobRange blo * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.resize#long} + * + *
+     * PageBlobItem pageBlob = client.resize(size);
+     * System.out.printf("Page blob resized with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param size Resizes a page blob to the specified size. If the specified value is less than the current size of * the blob, then all pages above the specified value are cleared. @@ -576,7 +795,16 @@ public PageBlobItem resize(long size) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.resizeWithResponse#long-BlobRequestConditions-Duration-Context} + * + *
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * Context context = new Context(key, value);
+     *
+     * PageBlobItem pageBlob = client
+     *     .resizeWithResponse(size, blobRequestConditions, timeout, context).getValue();
+     * System.out.printf("Page blob resized with sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param size Resizes a page blob to the specified size. If the specified value is less than the current size of * the blob, then all pages above the specified value are cleared. @@ -599,7 +827,13 @@ public Response resizeWithResponse(long size, BlobRequestCondition * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.updateSequenceNumber#SequenceNumberActionType-Long} + * + *
+     * PageBlobItem pageBlob = client.updateSequenceNumber(SequenceNumberActionType.INCREMENT, size);
+     *
+     * System.out.printf("Page blob updated to sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param action Indicates how the service should modify the blob's sequence number. * @param sequenceNumber The blob's sequence number. The sequence number is a user-controlled property that you can @@ -618,7 +852,17 @@ public PageBlobItem updateSequenceNumber(SequenceNumberActionType action, * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.updateSequenceNumberWithResponse#SequenceNumberActionType-Long-BlobRequestConditions-Duration-Context} + * + *
+     * BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
+     * Context context = new Context(key, value);
+     *
+     * PageBlobItem pageBlob = client.updateSequenceNumberWithResponse(
+     *     SequenceNumberActionType.INCREMENT, size, blobRequestConditions, timeout, context).getValue();
+     *
+     * System.out.printf("Page blob updated to sequence number %s%n", pageBlob.getBlobSequenceNumber());
+     * 
+ * * * @param action Indicates how the service should modify the blob's sequence number. * @param sequenceNumber The blob's sequence number. The sequence number is a user-controlled property that you can @@ -646,7 +890,29 @@ public Response updateSequenceNumberWithResponse(SequenceNumberAct * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.copyIncremental#String-String} + * + *
+     * final String snapshot = "copy snapshot";
+     * CopyStatusType statusType = client.copyIncremental(url, snapshot);
+     *
+     * switch (statusType) {
+     *     case SUCCESS:
+     *         System.out.println("Page blob copied successfully");
+     *         break;
+     *     case FAILED:
+     *         System.out.println("Page blob copied failed");
+     *         break;
+     *     case ABORTED:
+     *         System.out.println("Page blob copied aborted");
+     *         break;
+     *     case PENDING:
+     *         System.out.println("Page blob copied pending");
+     *         break;
+     *     default:
+     *         break;
+     * }
+     * 
+ * * * @param source The source page blob. * @param snapshot The snapshot on the copy source. @@ -668,7 +934,34 @@ public CopyStatusType copyIncremental(String source, String snapshot) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.copyIncrementalWithResponse#String-String-RequestConditions-Duration-Context} + * + *
+     * final String snapshot = "copy snapshot";
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfNoneMatch("snapshotMatch");
+     * Context context = new Context(key, value);
+     *
+     * CopyStatusType statusType = client
+     *     .copyIncrementalWithResponse(url, snapshot, modifiedRequestConditions, timeout, context).getValue();
+     *
+     * switch (statusType) {
+     *     case SUCCESS:
+     *         System.out.println("Page blob copied successfully");
+     *         break;
+     *     case FAILED:
+     *         System.out.println("Page blob copied failed");
+     *         break;
+     *     case ABORTED:
+     *         System.out.println("Page blob copied aborted");
+     *         break;
+     *     case PENDING:
+     *         System.out.println("Page blob copied pending");
+     *         break;
+     *     default:
+     *         break;
+     * }
+     * 
+ * * * @param source The source page blob. * @param snapshot The snapshot on the copy source. @@ -699,7 +992,35 @@ public Response copyIncrementalWithResponse(String source, Strin * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.PageBlobClient.copyIncrementalWithResponse#PageBlobCopyIncrementalOptions-Duration-Context} + * + *
+     * final String snapshot = "copy snapshot";
+     * PageBlobCopyIncrementalRequestConditions destinationRequestConditions = new PageBlobCopyIncrementalRequestConditions()
+     *     .setIfNoneMatch("snapshotMatch");
+     * Context context = new Context(key, value);
+     *
+     * CopyStatusType statusType = client
+     *     .copyIncrementalWithResponse(new PageBlobCopyIncrementalOptions(url, snapshot)
+     *         .setRequestConditions(destinationRequestConditions), timeout, context).getValue();
+     *
+     * switch (statusType) {
+     *     case SUCCESS:
+     *         System.out.println("Page blob copied successfully");
+     *         break;
+     *     case FAILED:
+     *         System.out.println("Page blob copied failed");
+     *         break;
+     *     case ABORTED:
+     *         System.out.println("Page blob copied aborted");
+     *         break;
+     *     case PENDING:
+     *         System.out.println("Page blob copied pending");
+     *         break;
+     *     default:
+     *         break;
+     * }
+     * 
+ * * * @param options {@link PageBlobCopyIncrementalOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. diff --git a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/SpecializedBlobClientBuilder.java b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/SpecializedBlobClientBuilder.java index 56fe12388de91..6816e95c1cd53 100644 --- a/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/SpecializedBlobClientBuilder.java +++ b/sdk/storage/azure-storage-blob/src/main/java/com/azure/storage/blob/specialized/SpecializedBlobClientBuilder.java @@ -469,7 +469,14 @@ public SpecializedBlobClientBuilder connectionString(String connectionString) { * *

Code Samples

* - * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.Builder.containerName#String} + * + *
+     * BlobClient client = new BlobClientBuilder()
+     *     .endpoint(endpoint)
+     *     .containerName(containerName)
+     *     .buildClient();
+     * 
+ * * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. diff --git a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/ReadmeSamples.java b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/ReadmeSamples.java index e6a992fd8b808..3e7b825da6d69 100644 --- a/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/ReadmeSamples.java +++ b/sdk/storage/azure-storage-blob/src/samples/java/com/azure/storage/blob/ReadmeSamples.java @@ -6,6 +6,7 @@ import com.azure.core.http.netty.NettyAsyncHttpClientBuilder; import com.azure.core.util.BinaryData; import com.azure.core.util.Context; +import com.azure.core.util.HttpClientOptions; import com.azure.core.util.polling.SyncPoller; import com.azure.identity.DefaultAzureCredentialBuilder; import com.azure.storage.blob.models.BlobCopyInfo; @@ -31,80 +32,109 @@ import java.time.OffsetDateTime; /** - * WARNING: MODIFYING THIS FILE WILL REQUIRE CORRESPONDING UPDATES TO README.md FILE. LINE NUMBERS - * ARE USED TO EXTRACT APPROPRIATE CODE SEGMENTS FROM THIS FILE. ADD NEW CODE AT THE BOTTOM TO AVOID CHANGING - * LINE NUMBERS OF EXISTING CODE SAMPLES. + * WARNING: MODIFYING THIS FILE WILL REQUIRE CORRESPONDING UPDATES TO README.md FILE. LINE NUMBERS ARE USED TO EXTRACT + * APPROPRIATE CODE SEGMENTS FROM THIS FILE. ADD NEW CODE AT THE BOTTOM TO AVOID CHANGING LINE NUMBERS OF EXISTING CODE + * SAMPLES. * * Code samples for the README.md */ public class ReadmeSamples { - private BlobServiceClient blobServiceClient = new BlobServiceClientBuilder().buildClient(); - private BlobContainerClient blobContainerClient = new BlobContainerClientBuilder().buildClient(); - private BlobClient blobClient = new BlobClientBuilder().buildClient(); + private final BlobServiceClient blobServiceClient = new BlobServiceClientBuilder().buildClient(); + private final BlobContainerClient blobContainerClient = new BlobContainerClientBuilder().buildClient(); + private final BlobClient blobClient = new BlobClientBuilder().buildClient(); public void getBlobServiceClient1() { + // BEGIN: readme-sample-getBlobServiceClient1 BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() .endpoint("") .sasToken("") .buildClient(); + // END: readme-sample-getBlobServiceClient1 } public void getBlobServiceClient2() { - // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". + // BEGIN: readme-sample-getBlobServiceClient2 + // Only one "?" is needed here. If the SAS token starts with "?", please removing one "?". BlobServiceClient blobServiceClient = new BlobServiceClientBuilder() .endpoint("" + "?" + "") .buildClient(); + // END: readme-sample-getBlobServiceClient2 } public void getBlobContainerClient1() { + // BEGIN: readme-sample-getBlobContainerClient1 BlobContainerClient blobContainerClient = blobServiceClient.getBlobContainerClient("mycontainer"); + // END: readme-sample-getBlobContainerClient1 } public void getBlobContainerClient2() { + // BEGIN: readme-sample-getBlobContainerClient2 BlobContainerClient blobContainerClient = new BlobContainerClientBuilder() .endpoint("") .sasToken("") .containerName("mycontainer") .buildClient(); + // END: readme-sample-getBlobContainerClient2 } public void getBlobContainerClient3() { - // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". + // BEGIN: readme-sample-getBlobContainerClient3 + // Only one "?" is needed here. If the SAS token starts with "?", please removing one "?". BlobContainerClient blobContainerClient = new BlobContainerClientBuilder() .endpoint("" + "/" + "mycontainer" + "?" + "") .buildClient(); + // END: readme-sample-getBlobContainerClient3 } public void getBlobClient1() { + // BEGIN: readme-sample-getBlobClient1 BlobClient blobClient = blobContainerClient.getBlobClient("myblob"); + // END: readme-sample-getBlobClient1 } public void getBlobClient2() { + // BEGIN: readme-sample-getBlobClient2 BlobClient blobClient = new BlobClientBuilder() .endpoint("") .sasToken("") .containerName("mycontainer") .blobName("myblob") .buildClient(); + // END: readme-sample-getBlobClient2 } public void getBlobClient3() { - // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". + // BEGIN: readme-sample-getBlobClient3 + // Only one "?" is needed here. If the SAS token starts with "?", please removing one "?". BlobClient blobClient = new BlobClientBuilder() .endpoint("" + "/" + "mycontainer" + "/" + "myblob" + "?" + "") .buildClient(); + // END: readme-sample-getBlobClient3 } public void createBlobContainerClient1() { + // BEGIN: readme-sample-createBlobContainerClient1 blobServiceClient.createBlobContainer("mycontainer"); + // END: readme-sample-createBlobContainerClient1 } public void createBlobContainerClient2() { + // BEGIN: readme-sample-createBlobContainerClient2 blobContainerClient.create(); + // END: readme-sample-createBlobContainerClient2 + } + + public void uploadBinaryDataToBlob() { + // BEGIN: readme-sample-uploadBinaryDataToBlob + BlobClient blobClient = blobContainerClient.getBlobClient("myblockblob"); + String dataSample = "samples"; + blobClient.upload(BinaryData.fromString(dataSample)); + // END: readme-sample-uploadBinaryDataToBlob } public void uploadBlobFromStream() { + // BEGIN: readme-sample-uploadBlobFromStream BlockBlobClient blockBlobClient = blobContainerClient.getBlobClient("myblockblob").getBlockBlobClient(); String dataSample = "samples"; try (ByteArrayInputStream dataStream = new ByteArrayInputStream(dataSample.getBytes())) { @@ -112,73 +142,21 @@ public void uploadBlobFromStream() { } catch (IOException e) { e.printStackTrace(); } + // END: readme-sample-uploadBlobFromStream } public void uploadBlobFromFile() { + // BEGIN: readme-sample-uploadBlobFromFile BlobClient blobClient = blobContainerClient.getBlobClient("myblockblob"); blobClient.uploadFromFile("local-file.jpg"); - } - - public void downloadBlobToStream() { - try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { - blobClient.downloadStream(outputStream); - } catch (IOException e) { - e.printStackTrace(); - } - } - - public void downloadBlobToFile() { - blobClient.downloadToFile("downloaded-file.jpg"); - } - - public void enumerateBlobs() { - for (BlobItem blobItem : blobContainerClient.listBlobs()) { - System.out.println("This is the blob name: " + blobItem.getName()); - } - } - - public void authWithIdentity() { - BlobServiceClient blobStorageClient = new BlobServiceClientBuilder() - .endpoint("") - .credential(new DefaultAzureCredentialBuilder().build()) - .buildClient(); - } - - public void copyBlob() { - SyncPoller poller = blobClient.beginCopy("", Duration.ofSeconds(1)); - poller.waitForCompletion(); - } - - public void copyBlob2() { - blobClient.copyFromUrl("url-to-blob"); - } - - public void uploadBinaryDataToBlob() { - BlobClient blobClient = blobContainerClient.getBlobClient("myblockblob"); - String dataSample = "samples"; - blobClient.upload(BinaryData.fromString(dataSample)); - } - - public void downloadDataFromBlob() { - BinaryData content = blobClient.downloadContent(); - } - - public void enumerateBlobsCreateClient() { - for (BlobItem blobItem : blobContainerClient.listBlobs()) { - BlobClient blobClient; - if (blobItem.getSnapshot() != null) { - blobClient = blobContainerClient.getBlobClient(blobItem.getName(), blobItem.getSnapshot()); - } else { - blobClient = blobContainerClient.getBlobClient(blobItem.getName()); - } - System.out.println("This is the new blob uri: " + blobClient.getBlobUrl()); - } + // END: readme-sample-uploadBlobFromFile } public void uploadIfNotExists() { + // BEGIN: readme-sample-uploadIfNotExists /* - Rather than use an if block conditioned on an exists call, there are three ways to upload-if-not-exists using one - network call instead of two. Equivalent options are present on all upload methods. + * Rather than use an if block conditioned on an exists call, there are three ways to upload-if-not-exists using + * one network call instead of two. Equivalent options are present on all upload methods. */ // 1. The minimal upload method defaults to no overwriting String dataSample = "samples"; @@ -205,12 +183,14 @@ public void uploadIfNotExists() { } catch (IOException e) { e.printStackTrace(); } + // END: readme-sample-uploadIfNotExists } public void overwriteBlob() { + // BEGIN: readme-sample-overwriteBlob /* - Rather than use an if block conditioned on an exists call, there are three ways to upload-if-exists in one - network call instead of two. Equivalent options are present on all upload methods. + * Rather than use an if block conditioned on an exists call, there are three ways to upload-if-exists in one + * network call instead of two. Equivalent options are present on all upload methods. */ String dataSample = "samples"; @@ -222,8 +202,8 @@ public void overwriteBlob() { } /* - 2. If the max overload is needed and no access conditions are passed, the upload will succeed as both a - create and overwrite. + * 2. If the max overload is needed and no access conditions are passed, the upload will succeed as both a + * create and overwrite. */ try (ByteArrayInputStream dataStream = new ByteArrayInputStream(dataSample.getBytes())) { BlobParallelUploadOptions options = @@ -234,8 +214,8 @@ public void overwriteBlob() { } /* - 3. If the max overload is needed, access conditions may be used to assert that the upload is an overwrite and - not simply a create. + * 3. If the max overload is needed, access conditions may be used to assert that the upload is an overwrite and + * not simply a create. */ try (ByteArrayInputStream dataStream = new ByteArrayInputStream(dataSample.getBytes())) { BlobParallelUploadOptions options = @@ -246,44 +226,102 @@ public void overwriteBlob() { } catch (IOException e) { e.printStackTrace(); } + // END: readme-sample-overwriteBlob } - public void setProxy() { - ProxyOptions options = new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 888)); - BlobServiceClient client = new BlobServiceClientBuilder() - .httpClient(new NettyAsyncHttpClientBuilder().proxy(options).build()) - .buildClient(); + public void openBlobOutputStream() { + // BEGIN: readme-sample-openBlobOutputStream + /* + * Opening a blob input stream allows you to write to a blob through a normal stream interface. It will not be + * committed until the stream is closed. + * This option is convenient when the length of the data is unknown. + * This can only be done for block blobs. If the target blob already exists as another type of blob, it will + * fail. + */ + try (BlobOutputStream blobOS = blobClient.getBlockBlobClient().getBlobOutputStream()) { + blobOS.write(new byte[0]); + } catch (IOException e) { + e.printStackTrace(); + } + // END: readme-sample-openBlobOutputStream + } + + public void downloadDataFromBlob() { + // BEGIN: readme-sample-downloadDataFromBlob + BinaryData content = blobClient.downloadContent(); + // END: readme-sample-downloadDataFromBlob + } + + public void downloadBlobToStream() { + // BEGIN: readme-sample-downloadBlobToStream + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { + blobClient.downloadStream(outputStream); + } catch (IOException e) { + e.printStackTrace(); + } + // END: readme-sample-downloadBlobToStream + } + + public void downloadBlobToFile() { + // BEGIN: readme-sample-downloadBlobToFile + blobClient.downloadToFile("downloaded-file.jpg"); + // END: readme-sample-downloadBlobToFile } public void openBlobInputStream() { + // BEGIN: readme-sample-openBlobInputStream /* - Opening a blob input stream allows you to read from a blob through a normal stream interface. It is also - markable. + * Opening a blob input stream allows you to read from a blob through a normal stream interface. It is also + * mark-able. */ try (BlobInputStream blobIS = blobClient.openInputStream()) { blobIS.read(); } catch (IOException e) { e.printStackTrace(); } + // END: readme-sample-openBlobInputStream } - public void openBlobOutputStream() { - /* - Opening a blob input stream allows you to write to a blob through a normal stream interface. It will not be - committed until the stream is closed. - This option is convenient when the length of the data is unknown. - This can only be done for block blobs. If the target blob already exists as another type of blob, it will fail. - */ - try (BlobOutputStream blobOS = blobClient.getBlockBlobClient().getBlobOutputStream()) { - blobOS.write(new byte[0]); - } catch (IOException e) { - e.printStackTrace(); + public void enumerateBlobs() { + // BEGIN: readme-sample-enumerateBlobs + for (BlobItem blobItem : blobContainerClient.listBlobs()) { + System.out.println("This is the blob name: " + blobItem.getName()); + } + // END: readme-sample-enumerateBlobs + } + + public void enumerateBlobsCreateClient() { + // BEGIN: readme-sample-enumerateBlobsCreateClient + for (BlobItem blobItem : blobContainerClient.listBlobs()) { + BlobClient blobClient; + if (blobItem.getSnapshot() != null) { + blobClient = blobContainerClient.getBlobClient(blobItem.getName(), blobItem.getSnapshot()); + } else { + blobClient = blobContainerClient.getBlobClient(blobItem.getName()); + } + System.out.println("This is the new blob uri: " + blobClient.getBlobUrl()); } + // END: readme-sample-enumerateBlobsCreateClient + } + + public void copyBlob() { + // BEGIN: readme-sample-copyBlob + SyncPoller poller = blobClient.beginCopy("", Duration.ofSeconds(1)); + poller.waitForCompletion(); + // END: readme-sample-copyBlob + } + + public void copyBlob2() { + // BEGIN: readme-sample-copyBlob2 + blobClient.copyFromUrl("url-to-blob"); + // END: readme-sample-copyBlob2 } public void generateSas() { + // BEGIN: readme-sample-generateSas /* - Generate an account sas. Other samples in this file will demonstrate how to create a client with the sas token. + * Generate an account sas. Other samples in this file will demonstrate how to create a client with the sas + * token. */ // Configure the sas parameters. This is the minimal set. OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1); @@ -303,9 +341,38 @@ public void generateSas() { blobContainerClient.generateSas(serviceSasValues); // Generate a sas using a blob client - BlobSasPermission blobSasPermission = new BlobSasPermission().setReadPermission(true); + BlobSasPermission blobSasPermission = new BlobSasPermission().setReadPermission(true); serviceSasValues = new BlobServiceSasSignatureValues(expiryTime, blobSasPermission); blobClient.generateSas(serviceSasValues); + // END: readme-sample-generateSas + } + + public void authWithIdentity() { + // BEGIN: readme-sample-authWithIdentity + BlobServiceClient blobStorageClient = new BlobServiceClientBuilder() + .endpoint("") + .credential(new DefaultAzureCredentialBuilder().build()) + .buildClient(); + // END: readme-sample-authWithIdentity + } + + public void setProxy() { + // BEGIN: readme-sample-setProxy + ProxyOptions options = new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 888)); + BlobServiceClient client = new BlobServiceClientBuilder() + .httpClient(new NettyAsyncHttpClientBuilder().proxy(options).build()) + .buildClient(); + // END: readme-sample-setProxy + } + + public void setProxy2() { + // BEGIN: readme-sample-setProxy2 + HttpClientOptions clientOptions = new HttpClientOptions() + .setProxyOptions(new ProxyOptions(ProxyOptions.Type.HTTP, new InetSocketAddress("localhost", 888))); + BlobServiceClient client = new BlobServiceClientBuilder() + .clientOptions(clientOptions) + .buildClient(); + // END: readme-sample-setProxy2 } } diff --git a/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/StorageSharedKeyCredential.java b/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/StorageSharedKeyCredential.java index 08a1810a68e88..65b71e6d4ac51 100644 --- a/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/StorageSharedKeyCredential.java +++ b/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/StorageSharedKeyCredential.java @@ -60,7 +60,11 @@ public StorageSharedKeyCredential(String accountName, String accountKey) { * *

Code Samples

* - * {@codesnippet com.azure.storage.common.StorageSharedKeyCredential.fromConnectionString#String} + * + *
+     * StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString(connectionString);
+     * 
+ * * * @param connectionString Connection string used to build the SharedKey credential. * @return a SharedKey credential if the connection string contains AccountName and AccountKey diff --git a/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/credentials/SasTokenCredential.java b/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/credentials/SasTokenCredential.java index 40f6f0b2bcaac..4d40006e0104f 100644 --- a/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/credentials/SasTokenCredential.java +++ b/sdk/storage/azure-storage-common/src/main/java/com/azure/storage/common/implementation/credentials/SasTokenCredential.java @@ -56,10 +56,6 @@ public static SasTokenCredential fromSasTokenString(String sasToken) { * *

The entries in the passed map will be combined into a query string that is used as the SAS token.

* - *

Code Samples

- * - * {@codesnippet com.azure.storage.common.credentials.SasTokenCredential.fromQueryParameters#Map} - * * @param queryParameters URL query parameters * @return a SAS token credential if {@code queryParameters} is not {@code null} and has * the signature ("sig") query parameter, otherwise returns {@code null}. diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeDirectoryAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeDirectoryAsyncClient.java index 9ee6404a2f45b..aa5011f9fd708 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeDirectoryAsyncClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeDirectoryAsyncClient.java @@ -119,7 +119,12 @@ public String getDirectoryName() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.delete} + * + *
+     * client.delete().subscribe(response ->
+     *     System.out.println("Delete request completed"));
+     * 
+ * * *

For more information see the * Azure @@ -141,7 +146,16 @@ public Mono delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteWithResponse#boolean-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * boolean recursive = false; // Default value
+     *
+     * client.deleteWithResponse(recursive, requestConditions)
+     *     .subscribe(response -> System.out.println("Delete request completed"));
+     * 
+ * * *

For more information see the * Azure @@ -169,7 +183,11 @@ public Mono> deleteWithResponse(boolean recursive, DataLakeReques * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getFileAsyncClient#String} + * + *
+     * DataLakeFileAsyncClient dataLakeFileClient = client.getFileAsyncClient(fileName);
+     * 
+ * * * @param fileName A {@code String} representing the name of the file. * @return A new {@link DataLakeFileAsyncClient} object which references the file with the specified name in this @@ -195,7 +213,11 @@ public DataLakeFileAsyncClient getFileAsyncClient(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile#String} + * + *
+     * DataLakeFileAsyncClient fileClient = client.createFile(fileName).block();
+     * 
+ * * * @param fileName Name of the file to create. * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the file created. @@ -212,7 +234,12 @@ public Mono createFile(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFile#String-boolean} + * + *
+     * boolean overwrite = false; /* Default value. */
+     * DataLakeFileAsyncClient fClient = client.createFile(fileName, overwrite).block();
+     * 
+ * * * @param fileName Name of the file to create. * @param overwrite Whether or not to overwrite, should the file exist. @@ -240,7 +267,20 @@ public Mono createFile(String fileName, boolean overwri * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createFileWithResponse#String-String-String-PathHttpHeaders-Map-DataLakeRequestConditions} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     * DataLakeFileAsyncClient newFileClient = client.createFileWithResponse(fileName,
+     *     permissions, umask, httpHeaders, Collections.singletonMap("metadata", "value"), requestConditions
+     * ).block().getValue();
+     * 
+ * * * @param fileName Name of the file to create. * @param permissions POSIX access permissions for the file owner, the file owning group, and others. @@ -273,7 +313,12 @@ public Mono> createFileWithResponse(String fil * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFile#String} + * + *
+     * client.deleteFile(fileName).subscribe(response ->
+     *     System.out.println("Delete request completed"));
+     * 
+ * * * @param fileName Name of the file to delete. * @return A reactive response signalling completion. @@ -294,7 +339,15 @@ public Mono deleteFile(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteFileWithResponse#String-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     *
+     * client.deleteFileWithResponse(fileName, requestConditions)
+     *     .subscribe(response -> System.out.println("Delete request completed"));
+     * 
+ * * * @param fileName Name of the file to delete. * @param requestConditions {@link DataLakeRequestConditions} @@ -316,7 +369,11 @@ public Mono> deleteFileWithResponse(String fileName, DataLakeRequ * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.getSubdirectoryAsyncClient#String} + * + *
+     * DataLakeDirectoryAsyncClient dataLakeDirectoryClient = client.getSubdirectoryAsyncClient(directoryName);
+     * 
+ * * * @param subdirectoryName A {@code String} representing the name of the sub-directory. * @return A new {@link DataLakeDirectoryAsyncClient} object which references the directory with the specified name @@ -342,7 +399,11 @@ public DataLakeDirectoryAsyncClient getSubdirectoryAsyncClient(String subdirecto * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory#String} + * + *
+     * DataLakeDirectoryAsyncClient directoryClient = client.createSubdirectory(directoryName).block();
+     * 
+ * * * @param subdirectoryName Name of the sub-directory to create. * @return A {@link Mono} containing a {@link DataLakeDirectoryAsyncClient} used to interact with the directory @@ -359,7 +420,12 @@ public Mono createSubdirectory(String subdirectory * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectory#String-boolean} + * + *
+     * boolean overwrite = false; /* Default value. */
+     * DataLakeDirectoryAsyncClient dClient = client.createSubdirectory(directoryName, overwrite).block();
+     * 
+ * * * @param subdirectoryName Name of the sub-directory to create. * @param overwrite Whether or not to overwrite, should the sub directory exist. @@ -387,7 +453,21 @@ public Mono createSubdirectory(String subdirectory * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.createSubdirectoryWithResponse#String-String-String-PathHttpHeaders-Map-DataLakeRequestConditions} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     * DataLakeDirectoryAsyncClient newDirectoryClient = client.createSubdirectoryWithResponse(
+     *     directoryName, permissions, umask, httpHeaders, Collections.singletonMap("metadata", "value"),
+     *     requestConditions
+     * ).block().getValue();
+     * 
+ * * * @param subdirectoryName Name of the sub-directory to create. * @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and @@ -422,7 +502,12 @@ public Mono> createSubdirectoryWithRespon * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectory#String} + * + *
+     * client.deleteSubdirectory(directoryName).subscribe(response ->
+     *     System.out.println("Delete request completed"));
+     * 
+ * * * @param subdirectoryName Name of the sub-directory to delete. * @return A reactive response signalling completion. @@ -444,7 +529,16 @@ public Mono deleteSubdirectory(String subdirectoryName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.deleteSubdirectoryWithResponse#String-boolean-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * boolean recursive = false; // Default value
+     *
+     * client.deleteSubdirectoryWithResponse(directoryName, recursive, requestConditions)
+     *     .subscribe(response -> System.out.println("Delete request completed"));
+     * 
+ * * * @param directoryName Name of the sub-directory to delete. * @param recursive Whether or not to delete all paths beneath the sub-directory. @@ -469,7 +563,12 @@ public Mono> deleteSubdirectoryWithResponse(String directoryName, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename#String-String} + * + *
+     * DataLakeDirectoryAsyncClient renamedClient = client.rename(fileSystemName, destinationPath).block();
+     * System.out.println("Directory Client has been renamed");
+     * 
+ * * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. @@ -495,7 +594,17 @@ public Mono rename(String destinationFileSystem, S * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.renameWithResponse#String-String-DataLakeRequestConditions-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions();
+     *
+     * DataLakeDirectoryAsyncClient newRenamedClient = client.renameWithResponse(fileSystemName, destinationPath,
+     *     sourceRequestConditions, destinationRequestConditions).block().getValue();
+     * System.out.println("Directory Client has been renamed");
+     * 
+ * * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. @@ -526,7 +635,11 @@ public Mono> renameWithResponse(String de * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths} + * + *
+     * client.listPaths().subscribe(path -> System.out.printf("Name: %s%n", path.getName()));
+     * 
+ * * * @return A reactive response emitting the list of files/directories. */ @@ -541,7 +654,12 @@ public PagedFlux listPaths() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.listPaths#boolean-boolean-Integer} + * + *
+     * client.listPaths(false, false, 10)
+     *     .subscribe(path -> System.out.printf("Name: %s%n", path.getName()));
+     * 
+ * * * @param recursive Specifies if the call should recursively include all paths. * @param userPrincipleNameReturned If "true", the user identity values returned in the x-ms-owner, x-ms-group, diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeDirectoryClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeDirectoryClient.java index 8e42243a5fa4e..26271f4fa1f56 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeDirectoryClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeDirectoryClient.java @@ -89,7 +89,12 @@ public String getDirectoryName() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.delete} + * + *
+     * client.delete();
+     * System.out.println("Delete request completed");
+     * 
+ * * *

For more information see the * Azure @@ -105,7 +110,16 @@ public void delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteWithResponse#boolean-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * boolean recursive = false; // Default value
+     *
+     * client.deleteWithResponse(recursive, requestConditions, timeout, new Context(key1, value1));
+     * System.out.println("Delete request completed");
+     * 
+ * * *

For more information see the * Azure @@ -136,7 +150,11 @@ public Response deleteWithResponse(boolean recursive, DataLakeRequestCondi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.getFileClient#String} + * + *
+     * DataLakeFileClient dataLakeFileClient = client.getFileClient(fileName);
+     * 
+ * * * @return A new {@link DataLakeFileClient} object which references the file with the specified name in this * directory. @@ -155,7 +173,11 @@ public DataLakeFileClient getFileClient(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createFile#String} + * + *
+     * DataLakeFileClient fileClient = client.createFile(fileName);
+     * 
+ * * * @param fileName Name of the file to create. * @return A {@link DataLakeFileClient} used to interact with the file created. @@ -171,7 +193,12 @@ public DataLakeFileClient createFile(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createFile#String-boolean} + * + *
+     * boolean overwrite = false; /* Default value. */
+     * DataLakeFileClient fClient = client.createFile(fileName, overwrite);
+     * 
+ * * * @param fileName Name of the file to create. * @param overwrite Whether or not to overwrite, should a file exist. @@ -193,7 +220,20 @@ public DataLakeFileClient createFile(String fileName, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createFileWithResponse#String-String-String-PathHttpHeaders-Map-DataLakeRequestConditions-Duration-Context} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     * Response<DataLakeFileClient> newFileClient = client.createFileWithResponse(fileName, permissions, umask, httpHeaders,
+     *     Collections.singletonMap("metadata", "value"), requestConditions,
+     *     timeout, new Context(key1, value1));
+     * 
+ * * * @param fileName Name of the file to create. * @param permissions POSIX access permissions for the file owner, the file owning group, and others. @@ -225,7 +265,12 @@ public Response createFileWithResponse(String fileName, Stri * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteFile#String} + * + *
+     * client.deleteFile(fileName);
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param fileName Name of the file to delete. */ @@ -241,7 +286,15 @@ public void deleteFile(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteFileWithResponse#String-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     *
+     * client.deleteFileWithResponse(fileName, requestConditions, timeout, new Context(key1, value1));
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param fileName Name of the file to delete. * @param requestConditions {@link DataLakeRequestConditions} @@ -264,7 +317,11 @@ public Response deleteFileWithResponse(String fileName, DataLakeRequestCon * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.getSubdirectoryClient#String} + * + *
+     * DataLakeDirectoryClient dataLakeDirectoryClient = client.getSubdirectoryClient(directoryName);
+     * 
+ * * * @return A new {@link DataLakeDirectoryClient} object which references the sub-directory with the specified name * in this directory @@ -283,7 +340,11 @@ public DataLakeDirectoryClient getSubdirectoryClient(String subdirectoryName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createSubdirectory#String} + * + *
+     * DataLakeDirectoryClient directoryClient = client.createSubdirectory(directoryName);
+     * 
+ * * * @param subdirectoryName Name of the sub-directory to create. * @return A {@link DataLakeDirectoryClient} used to interact with the sub-directory created. @@ -299,7 +360,12 @@ public DataLakeDirectoryClient createSubdirectory(String subdirectoryName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createSubdirectory#String-boolean} + * + *
+     * boolean overwrite = false; /* Default value. */
+     * DataLakeDirectoryClient dClient = client.createSubdirectory(fileName, overwrite);
+     * 
+ * * * @param subdirectoryName Name of the sub-directory to create. * @param overwrite Whether or not to overwrite, should the sub-directory exist. @@ -322,7 +388,20 @@ public DataLakeDirectoryClient createSubdirectory(String subdirectoryName, boole * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.createSubdirectoryWithResponse#String-String-String-PathHttpHeaders-Map-DataLakeRequestConditions-Duration-Context} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     * Response<DataLakeDirectoryClient> newDirectoryClient = client.createSubdirectoryWithResponse(directoryName,
+     *     permissions, umask, httpHeaders, Collections.singletonMap("metadata", "value"), requestConditions, timeout,
+     *     new Context(key1, value1));
+     * 
+ * * * @param subdirectoryName Name of the sub-directory to create. * @param permissions POSIX access permissions for the sub-directory owner, the sub-directory owning group, and @@ -356,7 +435,12 @@ public Response createSubdirectoryWithResponse(String s * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteSubdirectory#String} + * + *
+     * client.deleteSubdirectory(directoryName);
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param subdirectoryName Name of the sub-directory to delete. */ @@ -373,7 +457,17 @@ public void deleteSubdirectory(String subdirectoryName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.deleteSubdirectoryWithResponse#String-boolean-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * boolean recursive = false; // Default value
+     *
+     * client.deleteSubdirectoryWithResponse(directoryName, recursive, requestConditions, timeout,
+     *     new Context(key1, value1));
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param subdirectoryName Name of the sub-directory to delete. * @param recursive Whether or not to delete all paths beneath the sub-directory. @@ -397,7 +491,12 @@ public Response deleteSubdirectoryWithResponse(String subdirectoryName, bo * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.rename#String-String} + * + *
+     * DataLakeDirectoryClient renamedClient = client.rename(fileSystemName, destinationPath);
+     * System.out.println("Directory Client has been renamed");
+     * 
+ * * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. @@ -418,7 +517,17 @@ public DataLakeDirectoryClient rename(String destinationFileSystem, String desti * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.renameWithResponse#String-String-DataLakeRequestConditions-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions();
+     *
+     * DataLakeDirectoryClient newRenamedClient = client.renameWithResponse(fileSystemName, destinationPath,
+     *     sourceRequestConditions, destinationRequestConditions, timeout, new Context(key1, value1)).getValue();
+     * System.out.println("Directory Client has been renamed");
+     * 
+ * * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. @@ -452,7 +561,11 @@ public Response renameWithResponse(String destinationFi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.listPaths} + * + *
+     * client.listPaths().forEach(path -> System.out.printf("Name: %s%n", path.getName()));
+     * 
+ * * * @return The list of files/directories. */ @@ -468,7 +581,12 @@ public PagedIterable listPaths() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryClient.listPaths#boolean-boolean-Integer-Duration} + * + *
+     * client.listPaths(false, false, 10, timeout)
+     *     .forEach(path -> System.out.printf("Name: %s%n", path.getName()));
+     * 
+ * * * @param recursive Specifies if the call should recursively include all paths. * @param userPrincipleNameReturned If "true", the user identity values returned in the x-ms-owner, x-ms-group, diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileAsyncClient.java index b029e56a08a5e..089cbce2dcaca 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileAsyncClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileAsyncClient.java @@ -153,7 +153,12 @@ public String getFileName() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.delete} + * + *
+     * client.delete().subscribe(response ->
+     *     System.out.println("Delete request completed"));
+     * 
+ * * *

For more information see the * Azure @@ -175,7 +180,15 @@ public Mono delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.deleteWithResponse#DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     *
+     * client.deleteWithResponse(requestConditions)
+     *     .subscribe(response -> System.out.println("Delete request completed"));
+     * 
+ * * *

For more information see the * Azure @@ -201,7 +214,13 @@ public Mono> deleteWithResponse(DataLakeRequestConditions request * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.upload#Flux-ParallelTransferOptions} + * + *
+     * client.uploadFromFile(filePath)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param data The data to write to the file. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected @@ -219,7 +238,14 @@ public Mono upload(Flux data, ParallelTransferOptions para * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.upload#Flux-ParallelTransferOptions-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.uploadFromFile(filePath, overwrite)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param data The data to write to the file. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected @@ -257,11 +283,46 @@ public Mono upload(Flux data, ParallelTransferOptions para * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse#Flux-ParallelTransferOptions-PathHttpHeaders-Map-DataLakeRequestConditions} + * + *
+     * PathHttpHeaders headers = new PathHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Long blockSize = 100L * 1024L * 1024L; // 100 MB;
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize);
+     *
+     * client.uploadWithResponse(data, parallelTransferOptions, headers, metadata, requestConditions)
+     *     .subscribe(response -> System.out.println("Uploaded file %n"));
+     * 
+ * * *

Using Progress Reporting

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse#Flux-ParallelTransferOptions-PathHttpHeaders-Map-DataLakeRequestConditions.ProgressReporter} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadataMap = Collections.singletonMap("metadata", "value");
+     * DataLakeRequestConditions conditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * ParallelTransferOptions pto = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setProgressReceiver(bytesTransferred -> System.out.printf("Upload progress: %s bytes sent", bytesTransferred));
+     *
+     * client.uploadWithResponse(data, pto, httpHeaders, metadataMap, conditions)
+     *     .subscribe(response -> System.out.println("Uploaded file %n"));
+     * 
+ * * * @param data The data to write to the file. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected @@ -293,11 +354,52 @@ public Mono> uploadWithResponse(Flux data, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse#FileParallelUploadOptions} + * + *
+     * PathHttpHeaders headers = new PathHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Long blockSize = 100L * 1024L * 1024L; // 100 MB;
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize);
+     *
+     * client.uploadWithResponse(new FileParallelUploadOptions(data)
+     *     .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers)
+     *     .setMetadata(metadata).setRequestConditions(requestConditions)
+     *     .setPermissions("permissions").setUmask("umask"))
+     *     .subscribe(response -> System.out.println("Uploaded file %n"));
+     * 
+ * * *

Using Progress Reporting

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse#FileParallelUploadOptions.ProgressReporter} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadataMap = Collections.singletonMap("metadata", "value");
+     * DataLakeRequestConditions conditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * ParallelTransferOptions pto = new ParallelTransferOptions()
+     *     .setBlockSizeLong(blockSize)
+     *     .setProgressReceiver(bytesTransferred -> System.out.printf("Upload progress: %s bytes sent", bytesTransferred));
+     *
+     * client.uploadWithResponse(new FileParallelUploadOptions(data)
+     *     .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers)
+     *     .setMetadata(metadata).setRequestConditions(requestConditions)
+     *     .setPermissions("permissions").setUmask("umask"))
+     *     .subscribe(response -> System.out.println("Uploaded file %n"));
+     * 
+ * * * @param options {@link FileParallelUploadOptions} * @return A reactive response containing the information of the uploaded file. @@ -418,7 +520,13 @@ private Mono> uploadWithResponse(Flux data, long * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadFromFile#String} + * + *
+     * client.uploadFromFile(filePath)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param filePath Path to the upload file * @return An empty response @@ -438,7 +546,14 @@ public Mono uploadFromFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadFromFile#String-boolean} + * + *
+     * boolean overwrite = false; // Default behavior
+     * client.uploadFromFile(filePath, overwrite)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite, should the file already exist. @@ -477,7 +592,25 @@ public Mono uploadFromFile(String filePath, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadFromFile#String-ParallelTransferOptions-PathHttpHeaders-Map-DataLakeRequestConditions} + * + *
+     * PathHttpHeaders headers = new PathHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Long blockSize = 100L * 1024L * 1024L; // 100 MB;
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize);
+     *
+     * client.uploadFromFile(filePath, parallelTransferOptions, headers, metadata, requestConditions)
+     *     .doOnError(throwable -> System.err.printf("Failed to upload from file %s%n", throwable.getMessage()))
+     *     .subscribe(completion -> System.out.println("Upload from file succeeded"));
+     * 
+ * * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel @@ -584,7 +717,14 @@ private List sliceFile(long fileSize, Long originalBlockSize, long bl * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.append#Flux-long-long} + * + *
+     * client.append(data, offset, length)
+     *     .subscribe(
+     *         response -> System.out.println("Append data completed"),
+     *         error -> System.out.printf("Error when calling append data: %s", error));
+     * 
+ * * *

For more information, see the * Azure @@ -611,7 +751,16 @@ public Mono append(Flux data, long fileOffset, long length) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.appendWithResponse#Flux-long-long-byte-String} + * + *
+     * FileRange range = new FileRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     * byte[] contentMd5 = new byte[0]; // Replace with valid md5
+     *
+     * client.appendWithResponse(data, offset, length, contentMd5, leaseId).subscribe(response ->
+     *     System.out.printf("Append data completed with status %d%n", response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure @@ -657,7 +806,12 @@ Mono> appendWithResponse(Flux data, long fileOffset, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush#long} + * + *
+     * client.flush(position).subscribe(response ->
+     *     System.out.println("Flush data completed"));
+     * 
+ * * *

For more information, see the * Azure @@ -682,7 +836,13 @@ public Mono flush(long position) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush#long-boolean} + * + *
+     * boolean overwrite = true;
+     * client.flush(position, overwrite).subscribe(response ->
+     *     System.out.println("Flush data completed"));
+     * 
+ * * *

For more information, see the * Azure @@ -713,7 +873,24 @@ public Mono flush(long position, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flushWithResponse#long-boolean-boolean-PathHttpHeaders-DataLakeRequestConditions} + * + *
+     * FileRange range = new FileRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     * byte[] contentMd5 = new byte[0]; // Replace with valid md5
+     * boolean retainUncommittedData = false;
+     * boolean close = false;
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     *
+     * client.flushWithResponse(position, retainUncommittedData, close, httpHeaders,
+     *     requestConditions).subscribe(response ->
+     *     System.out.printf("Flush data completed with status %d%n", response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure @@ -764,7 +941,18 @@ Mono> flushWithResponse(long position, boolean retainUncommit * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.read} + * + *
+     * ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
+     * client.read().subscribe(piece -> {
+     *     try {
+     *         downloadData.write(piece.array());
+     *     } catch (IOException ex) {
+     *         throw new UncheckedIOException(ex);
+     *     }
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -785,7 +973,23 @@ public Flux read() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readWithResponse#FileRange-DownloadRetryOptions-DataLakeRequestConditions-boolean} + * + *
+     * FileRange range = new FileRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * client.readWithResponse(range, options, null, false).subscribe(response -> {
+     *     ByteArrayOutputStream readData = new ByteArrayOutputStream();
+     *     response.getValue().subscribe(piece -> {
+     *         try {
+     *             readData.write(piece.array());
+     *         } catch (IOException ex) {
+     *             throw new UncheckedIOException(ex);
+     *         }
+     *     });
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -816,7 +1020,11 @@ public Mono readWithResponse(FileRange range, DownloadRet * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile#String} + * + *
+     * client.readToFile(file).subscribe(response -> System.out.println("Completed download to file"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -837,7 +1045,12 @@ public Mono readToFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile#String-boolean} + * + *
+     * boolean overwrite = false; // Default value
+     * client.readToFile(file, overwrite).subscribe(response -> System.out.println("Completed download to file"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -870,7 +1083,17 @@ public Mono readToFile(String filePath, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFileWithResponse#String-FileRange-ParallelTransferOptions-DownloadRetryOptions-DataLakeRequestConditions-boolean-Set} + * + *
+     * FileRange fileRange = new FileRange(1024, 2048L);
+     * DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions().setMaxRetryRequests(5);
+     * Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
+     *     StandardOpenOption.WRITE, StandardOpenOption.READ)); // Default options
+     *
+     * client.readToFileWithResponse(file, fileRange, null, downloadRetryOptions, null, false, openOptions)
+     *     .subscribe(response -> System.out.println("Completed download to file"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -908,7 +1131,12 @@ public Mono> readToFileWithResponse(String filePath, Fi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.rename#String-String} + * + *
+     * DataLakeFileAsyncClient renamedClient = client.rename(fileSystemName, destinationPath).block();
+     * System.out.println("Directory Client has been renamed");
+     * 
+ * * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. @@ -932,7 +1160,17 @@ public Mono rename(String destinationFileSystem, String * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.renameWithResponse#String-String-DataLakeRequestConditions-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions();
+     *
+     * DataLakeFileAsyncClient newRenamedClient = client.renameWithResponse(fileSystemName, destinationPath,
+     *     sourceRequestConditions, destinationRequestConditions).block().getValue();
+     * System.out.println("Directory Client has been renamed");
+     * 
+ * * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. @@ -966,7 +1204,19 @@ public Mono> renameWithResponse(String destina * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.query#String} + * + *
+     * ByteArrayOutputStream queryData = new ByteArrayOutputStream();
+     * String expression = "SELECT * from BlobStorage";
+     * client.query(expression).subscribe(piece -> {
+     *     try {
+     *         queryData.write(piece.array());
+     *     } catch (IOException ex) {
+     *         throw new UncheckedIOException(ex);
+     *     }
+     * });
+     * 
+ * * * @param expression The query expression. * @return A reactive response containing the queried data. @@ -984,7 +1234,41 @@ public Flux query(String expression) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.queryWithResponse#FileQueryOptions} + * + *
+     * String expression = "SELECT * from BlobStorage";
+     * FileQueryJsonSerialization input = new FileQueryJsonSerialization()
+     *     .setRecordSeparator('\n');
+     * FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization()
+     *     .setEscapeChar('\0')
+     *     .setColumnSeparator(',')
+     *     .setRecordSeparator('\n')
+     *     .setFieldQuote('\'')
+     *     .setHeadersPresent(true);
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * Consumer<FileQueryError> errorConsumer = System.out::println;
+     * Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println("total file bytes read: "
+     *     + progress.getBytesScanned());
+     * FileQueryOptions queryOptions = new FileQueryOptions(expression)
+     *     .setInputSerialization(input)
+     *     .setOutputSerialization(output)
+     *     .setRequestConditions(requestConditions)
+     *     .setErrorConsumer(errorConsumer)
+     *     .setProgressConsumer(progressConsumer);
+     *
+     * client.queryWithResponse(queryOptions)
+     *     .subscribe(response -> {
+     *         ByteArrayOutputStream queryData = new ByteArrayOutputStream();
+     *         response.getValue().subscribe(piece -> {
+     *             try {
+     *                 queryData.write(piece.array());
+     *             } catch (IOException ex) {
+     *                 throw new UncheckedIOException(ex);
+     *             }
+     *         });
+     *     });
+     * 
+ * * * @param queryOptions {@link FileQueryOptions The query options} * @return A reactive response containing the queried data. @@ -1001,7 +1285,14 @@ public Mono queryWithResponse(FileQueryOptions queryOpti * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.scheduleDeletion#FileScheduleDeletionOptions} + * + *
+     * FileScheduleDeletionOptions options = new FileScheduleDeletionOptions(OffsetDateTime.now().plusDays(1));
+     *
+     * client.scheduleDeletion(options)
+     *     .subscribe(r -> System.out.println("File deletion has been scheduled"));
+     * 
+ * * * @param options Schedule deletion parameters. * @return A reactive response signalling completion. @@ -1017,7 +1308,14 @@ public Mono scheduleDeletion(FileScheduleDeletionOptions options) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.scheduleDeletionWithResponse#FileScheduleDeletionOptions} + * + *
+     * FileScheduleDeletionOptions options = new FileScheduleDeletionOptions(OffsetDateTime.now().plusDays(1));
+     *
+     * client.scheduleDeletionWithResponse(options)
+     *     .subscribe(r -> System.out.println("File deletion has been scheduled"));
+     * 
+ * * * @param options Schedule deletion parameters. * @return A reactive response signalling completion. diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileClient.java index 74ecfd1ba05cc..e746fec3b3f58 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileClient.java @@ -130,7 +130,12 @@ public String getFileName() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.delete} + * + *
+     * client.delete();
+     * System.out.println("Delete request completed");
+     * 
+ * * *

For more information see the * Azure @@ -146,7 +151,15 @@ public void delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.deleteWithResponse#DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     *
+     * client.deleteWithResponse(requestConditions, timeout, new Context(key1, value1));
+     * System.out.println("Delete request completed");
+     * 
+ * * *

For more information see the * Azure @@ -172,7 +185,16 @@ public Response deleteWithResponse(DataLakeRequestConditions requestCondit * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.upload#InputStream-long} + * + *
+     * try {
+     *     client.upload(data, length);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark @@ -191,7 +213,17 @@ public PathInfo upload(InputStream data, long length) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.upload#InputStream-long-boolean} + * + *
+     * try {
+     *     boolean overwrite = false;
+     *     client.upload(data, length, overwrite);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark @@ -217,7 +249,31 @@ public PathInfo upload(InputStream data, long length, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.uploadWithResponse#FileParallelUploadOptions-Duration-Context} + * + *
+     * PathHttpHeaders headers = new PathHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Long blockSize = 100L * 1024L * 1024L; // 100 MB;
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize);
+     *
+     * try {
+     *     client.uploadWithResponse(new FileParallelUploadOptions(data, length)
+     *         .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers)
+     *         .setMetadata(metadata).setRequestConditions(requestConditions)
+     *         .setPermissions("permissions").setUmask("umask"), timeout, new Context("key", "value"));
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param options {@link FileParallelUploadOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -244,7 +300,16 @@ public Response uploadWithResponse(FileParallelUploadOptions options, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile#String} + * + *
+     * try {
+     *     client.uploadFromFile(filePath);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param filePath Path of the file to upload * @throws UncheckedIOException If an I/O error occurs @@ -259,7 +324,17 @@ public void uploadFromFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile#String-boolean} + * + *
+     * try {
+     *     boolean overwrite = false;
+     *     client.uploadFromFile(filePath, overwrite);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param filePath Path of the file to upload * @param overwrite Whether or not to overwrite, should the file already exist @@ -287,7 +362,28 @@ && exists()) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.uploadFromFile#String-ParallelTransferOptions-PathHttpHeaders-Map-DataLakeRequestConditions-Duration} + * + *
+     * PathHttpHeaders headers = new PathHttpHeaders()
+     *     .setContentMd5("data".getBytes(StandardCharsets.UTF_8))
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Long blockSize = 100L * 1024L * 1024L; // 100 MB;
+     * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions().setBlockSizeLong(blockSize);
+     *
+     * try {
+     *     client.uploadFromFile(filePath, parallelTransferOptions, headers, metadata, requestConditions, timeout);
+     *     System.out.println("Upload from file succeeded");
+     * } catch (UncheckedIOException ex) {
+     *     System.err.printf("Failed to upload from file %s%n", ex.getMessage());
+     * }
+     * 
+ * * * @param filePath Path of the file to upload * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. @@ -317,7 +413,12 @@ public void uploadFromFile(String filePath, ParallelTransferOptions parallelTran * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.append#InputStream-long-long} + * + *
+     * client.append(data, offset, length);
+     * System.out.println("Append data completed");
+     * 
+ * * *

For more information, see the * Azure @@ -337,7 +438,17 @@ public void append(InputStream data, long fileOffset, long length) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.appendWithResponse#InputStream-long-long-byte-String-Duration-Context} + * + *
+     * FileRange range = new FileRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     * byte[] contentMd5 = new byte[0]; // Replace with valid md5
+     *
+     * Response<Void> response = client.appendWithResponse(data, offset, length, contentMd5, leaseId, timeout,
+     *     new Context(key1, value1));
+     * System.out.printf("Append data completed with status %d%n", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure @@ -379,7 +490,12 @@ public Response appendWithResponse(InputStream data, long fileOffset, long * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush#long} + * + *
+     * client.flush(position);
+     * System.out.println("Flush data completed");
+     * 
+ * * *

For more information, see the * Azure @@ -400,7 +516,13 @@ public PathInfo flush(long position) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flush#long-boolean} + * + *
+     * boolean overwrite = true;
+     * client.flush(position, overwrite);
+     * System.out.println("Flush data completed");
+     * 
+ * * *

For more information, see the * Azure @@ -426,7 +548,24 @@ public PathInfo flush(long position, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.flushWithResponse#long-boolean-boolean-PathHttpHeaders-DataLakeRequestConditions-Duration-Context} + * + *
+     * FileRange range = new FileRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     * byte[] contentMd5 = new byte[0]; // Replace with valid md5
+     * boolean retainUncommittedData = false;
+     * boolean close = false;
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     *
+     * Response<PathInfo> response = client.flushWithResponse(position, retainUncommittedData, close, httpHeaders,
+     *     requestConditions, timeout, new Context(key1, value1));
+     * System.out.printf("Flush data completed with status %d%n", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure @@ -456,7 +595,12 @@ public Response flushWithResponse(long position, boolean retainUncommi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.read#OutputStream} + * + *
+     * client.read(new ByteArrayOutputStream());
+     * System.out.println("Download completed.");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -474,7 +618,16 @@ public void read(OutputStream stream) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readWithResponse#OutputStream-FileRange-DownloadRetryOptions-DataLakeRequestConditions-boolean-Duration-Context} + * + *
+     * FileRange range = new FileRange(1024, 2048L);
+     * DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
+     *
+     * System.out.printf("Download completed with status %d%n",
+     *     client.readWithResponse(new ByteArrayOutputStream(), range, options, null, false,
+     *         timeout, new Context(key2, value2)).getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -534,7 +687,12 @@ public DataLakeFileOpenInputStreamResult openInputStream(DataLakeFileInputStream * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readToFile#String} + * + *
+     * client.readToFile(file);
+     * System.out.println("Completed download to file");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -556,7 +714,13 @@ public PathProperties readToFile(String filePath) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readToFile#String-boolean} + * + *
+     * boolean overwrite = false; // Default value
+     * client.readToFile(file, overwrite);
+     * System.out.println("Completed download to file");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -590,7 +754,18 @@ public PathProperties readToFile(String filePath, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.readToFileWithResponse#String-FileRange-ParallelTransferOptions-DownloadRetryOptions-DataLakeRequestConditions-boolean-Set-Duration-Context} + * + *
+     * FileRange fileRange = new FileRange(1024, 2048L);
+     * DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions().setMaxRetryRequests(5);
+     * Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
+     *     StandardOpenOption.WRITE, StandardOpenOption.READ)); // Default options
+     *
+     * client.readToFileWithResponse(file, fileRange, new ParallelTransferOptions().setBlockSizeLong(4L * Constants.MB),
+     *     downloadRetryOptions, null, false, openOptions, timeout, new Context(key2, value2));
+     * System.out.println("Completed download to file");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -633,7 +808,12 @@ public Response readToFileWithResponse(String filePath, FileRang * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeDirectoryAsyncClient.rename#String-String} + * + *
+     * DataLakeDirectoryAsyncClient renamedClient = client.rename(fileSystemName, destinationPath).block();
+     * System.out.println("Directory Client has been renamed");
+     * 
+ * * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. @@ -654,7 +834,17 @@ public DataLakeFileClient rename(String destinationFileSystem, String destinatio * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.renameWithResponse#String-String-DataLakeRequestConditions-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions sourceRequestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * DataLakeRequestConditions destinationRequestConditions = new DataLakeRequestConditions();
+     *
+     * DataLakeFileClient newRenamedClient = client.renameWithResponse(fileSystemName, destinationPath,
+     *     sourceRequestConditions, destinationRequestConditions, timeout, new Context(key1, value1)).getValue();
+     * System.out.println("Directory Client has been renamed");
+     * 
+ * * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. @@ -689,7 +879,13 @@ public Response renameWithResponse(String destinationFileSys * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream#String} + * + *
+     * String expression = "SELECT * from BlobStorage";
+     * InputStream inputStream = client.openQueryInputStream(expression);
+     * // Now you can read from the input stream like you would normally.
+     * 
+ * * * @param expression The query expression. * @return An InputStream object that represents the stream to use for reading the query response. @@ -706,7 +902,33 @@ public InputStream openQueryInputStream(String expression) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.openQueryInputStream#FileQueryOptions} + * + *
+     * String expression = "SELECT * from BlobStorage";
+     * FileQuerySerialization input = new FileQueryDelimitedSerialization()
+     *     .setColumnSeparator(',')
+     *     .setEscapeChar('\n')
+     *     .setRecordSeparator('\n')
+     *     .setHeadersPresent(true)
+     *     .setFieldQuote('"');
+     * FileQuerySerialization output = new FileQueryJsonSerialization()
+     *     .setRecordSeparator('\n');
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId("leaseId");
+     * Consumer<FileQueryError> errorConsumer = System.out::println;
+     * Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println("total file bytes read: "
+     *     + progress.getBytesScanned());
+     * FileQueryOptions queryOptions = new FileQueryOptions(expression)
+     *     .setInputSerialization(input)
+     *     .setOutputSerialization(output)
+     *     .setRequestConditions(requestConditions)
+     *     .setErrorConsumer(errorConsumer)
+     *     .setProgressConsumer(progressConsumer);
+     *
+     * InputStream inputStream = client.openQueryInputStreamWithResponse(queryOptions).getValue();
+     * // Now you can read from the input stream like you would normally.
+     * 
+ * * * @param queryOptions {@link FileQueryOptions The query options}. * @return A response containing status code and HTTP headers including an InputStream object @@ -734,7 +956,14 @@ public Response openQueryInputStreamWithResponse(FileQueryOptions q * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.query#OutputStream-String} + * + *
+     * ByteArrayOutputStream queryData = new ByteArrayOutputStream();
+     * String expression = "SELECT * from BlobStorage";
+     * client.query(queryData, expression);
+     * System.out.println("Query completed.");
+     * 
+ * * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param expression The query expression. @@ -753,7 +982,33 @@ public void query(OutputStream stream, String expression) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.queryWithResponse#FileQueryOptions-Duration-Context} + * + *
+     * ByteArrayOutputStream queryData = new ByteArrayOutputStream();
+     * String expression = "SELECT * from BlobStorage";
+     * FileQueryJsonSerialization input = new FileQueryJsonSerialization()
+     *     .setRecordSeparator('\n');
+     * FileQueryDelimitedSerialization output = new FileQueryDelimitedSerialization()
+     *     .setEscapeChar('\0')
+     *     .setColumnSeparator(',')
+     *     .setRecordSeparator('\n')
+     *     .setFieldQuote('\'')
+     *     .setHeadersPresent(true);
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * Consumer<FileQueryError> errorConsumer = System.out::println;
+     * Consumer<FileQueryProgress> progressConsumer = progress -> System.out.println("total file bytes read: "
+     *     + progress.getBytesScanned());
+     * FileQueryOptions queryOptions = new FileQueryOptions(expression, queryData)
+     *     .setInputSerialization(input)
+     *     .setOutputSerialization(output)
+     *     .setRequestConditions(requestConditions)
+     *     .setErrorConsumer(errorConsumer)
+     *     .setProgressConsumer(progressConsumer);
+     * System.out.printf("Query completed with status %d%n",
+     *     client.queryWithResponse(queryOptions, timeout, new Context(key1, value1))
+     *         .getStatusCode());
+     * 
+ * * * @param queryOptions {@link FileQueryOptions The query options}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -776,7 +1031,13 @@ public FileQueryResponse queryWithResponse(FileQueryOptions queryOptions, Durati * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletion#FileScheduleDeletionOptions} + * + *
+     * FileScheduleDeletionOptions options = new FileScheduleDeletionOptions(OffsetDateTime.now().plusDays(1));
+     * client.scheduleDeletion(options);
+     * System.out.println("File deletion has been scheduled");
+     * 
+ * * * @param options Schedule deletion parameters. */ @@ -791,7 +1052,15 @@ public void scheduleDeletion(FileScheduleDeletionOptions options) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileClient.scheduleDeletionWithResponse#FileScheduleDeletionOptions-Duration-Context} + * + *
+     * FileScheduleDeletionOptions options = new FileScheduleDeletionOptions(OffsetDateTime.now().plusDays(1));
+     * Context context = new Context("key", "value");
+     *
+     * client.scheduleDeletionWithResponse(options, timeout, context);
+     * System.out.println("File deletion has been scheduled");
+     * 
+ * * * @param options Schedule deletion parameters. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemAsyncClient.java index 9646bf1aae9af..10bbc331e5f00 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemAsyncClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemAsyncClient.java @@ -139,7 +139,11 @@ public class DataLakeFileSystemAsyncClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getFileAsyncClient#String} + * + *
+     * DataLakeFileAsyncClient dataLakeFileAsyncClient = client.getFileAsyncClient(fileName);
+     * 
+ * * * @param fileName A {@code String} representing the name of the file. If the path name contains special characters, * pass in the url encoded version of the path name. @@ -163,7 +167,11 @@ public DataLakeFileAsyncClient getFileAsyncClient(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getDirectoryAsyncClient#String} + * + *
+     * DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient = client.getDirectoryAsyncClient(directoryName);
+     * 
+ * * * @param directoryName A {@code String} representing the name of the directory. If the path name contains special * characters, pass in the url encoded version of the path name. @@ -186,7 +194,11 @@ public DataLakeDirectoryAsyncClient getDirectoryAsyncClient(String directoryName * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getRootDirectoryAsyncClient} + * + *
+     * DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient = client.getRootDirectoryAsyncClient();
+     * 
+ * * * @return A new {@link DataLakeDirectoryAsyncClient} object which references the root directory * in this file system. @@ -218,7 +230,12 @@ public String getFileSystemUrl() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getFileSystemName} + * + *
+     * String fileSystemName = client.getFileSystemName();
+     * System.out.println("The name of the file system is " + fileSystemName);
+     * 
+ * * * @return The name of file system. */ @@ -260,7 +277,13 @@ public HttpPipeline getHttpPipeline() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.create} + * + *
+     * client.create().subscribe(
+     *     response -> System.out.printf("Create completed%n"),
+     *     error -> System.out.printf("Error while creating file system %s%n", error));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -280,7 +303,13 @@ public Mono create() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createWithResponse#Map-PublicAccessType} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * client.createWithResponse(metadata, PublicAccessType.CONTAINER).subscribe(response ->
+     *     System.out.printf("Create completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param metadata Metadata to associate with the file system. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -305,7 +334,13 @@ public Mono> createWithResponse(Map metadata, Pub * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.delete} + * + *
+     * client.delete().subscribe(
+     *     response -> System.out.printf("Delete completed%n"),
+     *     error -> System.out.printf("Delete failed: %s%n", error));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -325,7 +360,16 @@ public Mono delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteWithResponse#DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.deleteWithResponse(requestConditions).subscribe(response ->
+     *     System.out.printf("Delete completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param requestConditions {@link DataLakeRequestConditions} * @return A reactive response signalling completion. @@ -349,7 +393,15 @@ public Mono> deleteWithResponse(DataLakeRequestConditions request * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getProperties} + * + *
+     * client.getProperties().subscribe(response ->
+     *     System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n",
+     *         response.getDataLakePublicAccess(),
+     *         response.hasLegalHold(),
+     *         response.hasImmutabilityPolicy()));
+     * 
+ * * * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} containing the * file system properties. @@ -369,7 +421,16 @@ public Mono getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getPropertiesWithResponse#String} + * + *
+     *
+     * client.getPropertiesWithResponse(leaseId).subscribe(response ->
+     *     System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n",
+     *         response.getValue().getDataLakePublicAccess(),
+     *         response.getValue().hasLegalHold(),
+     *         response.getValue().hasImmutabilityPolicy()));
+     * 
+ * * * @param leaseId The lease ID the active lease on the file system must match. * @return A reactive response containing the file system properties. @@ -392,7 +453,14 @@ public Mono> getPropertiesWithResponse(String lea * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.setMetadata#Map} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * client.setMetadata(metadata).subscribe(
+     *     response -> System.out.printf("Set metadata completed%n"),
+     *     error -> System.out.printf("Set metadata failed: %s%n", error));
+     * 
+ * * * @param metadata Metadata to associate with the file system. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -413,7 +481,17 @@ public Mono setMetadata(Map metadata) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.setMetadataWithResponse#Map-DataLakeRequestConditions} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.setMetadataWithResponse(metadata, requestConditions).subscribe(response ->
+     *     System.out.printf("Set metadata completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param metadata Metadata to associate with the file system. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -442,7 +520,11 @@ public Mono> setMetadataWithResponse(Map metadata * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.listPaths} + * + *
+     * client.listPaths().subscribe(path -> System.out.printf("Name: %s%n", path.getName()));
+     * 
+ * * * @return A reactive response emitting the list of files/directories. */ @@ -461,7 +543,15 @@ public PagedFlux listPaths() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.listPaths#ListPathsOptions} + * + *
+     * ListPathsOptions options = new ListPathsOptions()
+     *     .setPath("PathNamePrefixToMatch")
+     *     .setMaxResults(10);
+     *
+     * client.listPaths(options).subscribe(path -> System.out.printf("Name: %s%n", path.getName()));
+     * 
+ * * * @param options A {@link ListPathsOptions} which specifies what data should be returned by the service. * @return A reactive response emitting the list of files/directories. @@ -526,7 +616,11 @@ private Mono listPathsSegment(String marker, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.listDeletedPaths} + * + *
+     * client.listDeletedPaths().subscribe(path -> System.out.printf("Name: %s%n", path.getPath()));
+     * 
+ * * * @return A reactive response emitting the list of files/directories. */ @@ -548,7 +642,16 @@ public PagedFlux listDeletedPaths() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.listDeletedPaths#String} + * + *
+     * int pageSize = 10;
+     * client.listDeletedPaths("PathNamePrefixToMatch")
+     *     .byPage(pageSize)
+     *     .subscribe(page ->
+     *         page.getValue().forEach(path ->
+     *             System.out.printf("Name: %s%n", path.getPath())));
+     * 
+ * * * @param prefix Specifies the path to filter the results to. * @return A reactive response emitting the list of files/directories. @@ -608,7 +711,11 @@ private Mono listDeletedPathsSegmen * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createFile#String} + * + *
+     * Mono<DataLakeFileAsyncClient> fileClient = client.createFile(fileName);
+     * 
+ * * * @param fileName Name of the file to create. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -625,7 +732,12 @@ public Mono createFile(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createFile#String-boolean} + * + *
+     * boolean overwrite = false; /* Default value. */
+     * Mono<DataLakeFileAsyncClient> fClient = client.createFile(fileName, overwrite);
+     * 
+ * * * @param fileName Name of the file to create. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -653,7 +765,19 @@ public Mono createFile(String fileName, boolean overwri * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createFileWithResponse#String-String-String-PathHttpHeaders-Map-DataLakeRequestConditions} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     * Mono<Response<DataLakeFileAsyncClient>> newFileClient = client.createFileWithResponse(fileName, permissions,
+     *     umask, httpHeaders, Collections.singletonMap("metadata", "value"), requestConditions);
+     * 
+ * * * @param fileName Name of the file to create. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -687,7 +811,12 @@ public Mono> createFileWithResponse(String fil * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteFile#String} + * + *
+     * client.deleteFile(fileName).subscribe(response ->
+     *     System.out.println("Delete request completed"));
+     * 
+ * * * @param fileName Name of the file to delete. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -709,7 +838,15 @@ public Mono deleteFile(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteFileWithResponse#String-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     *
+     * client.deleteFileWithResponse(fileName, requestConditions)
+     *     .subscribe(response -> System.out.println("Delete request completed"));
+     * 
+ * * * @param fileName Name of the file to delete. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -731,7 +868,11 @@ public Mono> deleteFileWithResponse(String fileName, DataLakeRequ * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createDirectory#String} + * + *
+     * Mono<DataLakeDirectoryAsyncClient> directoryClient = client.createDirectory(directoryName);
+     * 
+ * * * @param directoryName Name of the directory to create. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -749,7 +890,12 @@ public Mono createDirectory(String directoryName) * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createDirectory#String-boolean} + * + *
+     * boolean overwrite = false; /* Default value. */
+     * Mono<DataLakeDirectoryAsyncClient> dClient = client.createDirectory(directoryName, overwrite);
+     * 
+ * * * @param directoryName Name of the directory to create. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -778,7 +924,20 @@ public Mono createDirectory(String directoryName, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.createDirectoryWithResponse#String-String-String-PathHttpHeaders-Map-DataLakeRequestConditions} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     * Mono<Response<DataLakeDirectoryAsyncClient>> newDirectoryClient = client.createDirectoryWithResponse(
+     *     directoryName, permissions, umask, httpHeaders, Collections.singletonMap("metadata", "value"),
+     *     requestConditions);
+     * 
+ * * * @param directoryName Name of the directory to create. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -812,7 +971,12 @@ public Mono> createDirectoryWithResponse( * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteDirectory#String} + * + *
+     * client.deleteDirectory(directoryName).subscribe(response ->
+     *     System.out.println("Delete request completed"));
+     * 
+ * * * @param directoryName Name of the directory to delete. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -830,7 +994,16 @@ public Mono deleteDirectory(String directoryName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.deleteDirectoryWithResponse#String-boolean-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * boolean recursive = false; // Default value
+     *
+     * client.deleteDirectoryWithResponse(directoryName, recursive, requestConditions)
+     *     .subscribe(response -> System.out.println("Delete request completed"));
+     * 
+ * * * @param directoryName Name of the directory to delete. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -851,7 +1024,11 @@ public Mono> deleteDirectoryWithResponse(String directoryName, bo * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.undeletePath#String-String} + * + *
+     * client.undeletePath(deletedPath, deletionId).doOnSuccess(response -> System.out.println("Completed undelete"));
+     * 
+ * * * @param deletedPath The deleted path * @param deletionId deletion ID associated with the soft deleted path that uniquely identifies a resource if @@ -872,7 +1049,12 @@ public Mono undeletePath(String deletedPath, String del * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.undeletePathWithResponse#String-String} + * + *
+     * client.undeletePathWithResponse(deletedPath, deletionId)
+     *     .doOnSuccess(response -> System.out.println("Completed undelete"));
+     * 
+ * * * @param deletedPath The deleted path * @param deletionId deletion ID associated with the soft deleted path that uniquely identifies a resource if @@ -938,7 +1120,20 @@ Mono> undeletePathWithResponse(String deletedP * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.setAccessPolicy#PublicAccessType-List} + * + *
+     * DataLakeSignedIdentifier identifier = new DataLakeSignedIdentifier()
+     *     .setId("name")
+     *     .setAccessPolicy(new DataLakeAccessPolicy()
+     *         .setStartsOn(OffsetDateTime.now())
+     *         .setExpiresOn(OffsetDateTime.now().plusDays(7))
+     *         .setPermissions("permissionString"));
+     *
+     * client.setAccessPolicy(PublicAccessType.CONTAINER, Collections.singletonList(identifier)).subscribe(
+     *     response -> System.out.printf("Set access policy completed%n"),
+     *     error -> System.out.printf("Set access policy failed: %s%n", error));
+     * 
+ * * * @param accessType Specifies how the data in this file system is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. @@ -966,7 +1161,24 @@ public Mono setAccessPolicy(PublicAccessType accessType, ListCode Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.setAccessPolicyWithResponse#PublicAccessType-List-DataLakeRequestConditions} + * + *
+     * DataLakeSignedIdentifier identifier = new DataLakeSignedIdentifier()
+     *     .setId("name")
+     *     .setAccessPolicy(new DataLakeAccessPolicy()
+     *         .setStartsOn(OffsetDateTime.now())
+     *         .setExpiresOn(OffsetDateTime.now().plusDays(7))
+     *         .setPermissions("permissionString"));
+     *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.setAccessPolicyWithResponse(PublicAccessType.CONTAINER, Collections.singletonList(identifier), requestConditions)
+     *     .subscribe(response ->
+     *         System.out.printf("Set access policy completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param accessType Specifies how the data in this file system is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. @@ -999,7 +1211,19 @@ public Mono> setAccessPolicyWithResponse(PublicAccessType accessT * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getAccessPolicy} + * + *
+     * client.getAccessPolicy().subscribe(response -> {
+     *     System.out.printf("Data Lake Access Type: %s%n", response.getDataLakeAccessType());
+     *
+     *     for (DataLakeSignedIdentifier identifier : response.getIdentifiers()) {
+     *         System.out.printf("Identifier Name: %s, Permissions %s%n",
+     *             identifier.getId(),
+     *             identifier.getAccessPolicy().getPermissions());
+     *     }
+     * });
+     * 
+ * * * @return A reactive response containing the file system access policy. */ @@ -1019,7 +1243,19 @@ public Mono getAccessPolicy() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.getAccessPolicyWithResponse#String} + * + *
+     * client.getAccessPolicyWithResponse(leaseId).subscribe(response -> {
+     *     System.out.printf("Data Lake Access Type: %s%n", response.getValue().getDataLakeAccessType());
+     *
+     *     for (DataLakeSignedIdentifier identifier : response.getValue().getIdentifiers()) {
+     *         System.out.printf("Identifier Name: %s, Permissions %s%n",
+     *             identifier.getId(),
+     *             identifier.getAccessPolicy().getPermissions());
+     *     }
+     * });
+     * 
+ * * * @param leaseId The lease ID the active lease on the file system must match. * @return A reactive response containing the file system access policy. @@ -1041,7 +1277,8 @@ public Mono> getAccessPolicyWithResponse(Stri // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.rename#String} +// * +// * // * // * @param destinationContainerName The new name of the file system. // * @return A {@link Mono} containing a {@link DataLakeFileSystemAsyncClient} used to interact with the renamed file system. @@ -1056,7 +1293,8 @@ public Mono> getAccessPolicyWithResponse(Stri // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.renameWithResponse#FileSystemRenameOptions} +// * +// * // * // * @param options {@link FileSystemRenameOptions} // * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a @@ -1122,7 +1360,17 @@ BlobContainerAsyncClient getBlobContainerAsyncClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.generateUserDelegationSas#DataLakeServiceSasSignatureValues-UserDelegationKey} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * FileSystemSasPermission myPermission = new FileSystemSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey);
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -1145,7 +1393,17 @@ public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.generateUserDelegationSas#DataLakeServiceSasSignatureValues-UserDelegationKey-String-Context} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * FileSystemSasPermission myPermission = new FileSystemSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey, accountName, new Context("key", "value"));
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -1169,7 +1427,17 @@ public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.generateSas#DataLakeServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * FileSystemSasPermission permission = new FileSystemSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @@ -1186,7 +1454,18 @@ public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.generateSas#DataLakeServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * FileSystemSasPermission permission = new FileSystemSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemClient.java index 5088dbc50cba8..04fe3374efef7 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemClient.java @@ -88,7 +88,11 @@ public class DataLakeFileSystemClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getFileClient#String} + * + *
+     * DataLakeFileClient dataLakeFileClient = client.getFileClient(fileName);
+     * 
+ * * * @return A new {@link DataLakeFileClient} object which references the file with the specified name in this file * system. @@ -110,7 +114,11 @@ public DataLakeFileClient getFileClient(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getDirectoryClient#String} + * + *
+     * DataLakeDirectoryClient dataLakeDirectoryClient = client.getDirectoryClient(directoryName);
+     * 
+ * * * @return A new {@link DataLakeDirectoryClient} object which references the directory with the specified name in * this file system. @@ -129,7 +137,11 @@ public DataLakeDirectoryClient getDirectoryClient(String directoryName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getRootDirectoryClient} + * + *
+     * DataLakeDirectoryClient dataLakeDirectoryClient = client.getRootDirectoryClient();
+     * 
+ * * * @return A new {@link DataLakeDirectoryClient} object which references the root directory in this file system. */ @@ -142,7 +154,12 @@ DataLakeDirectoryClient getRootDirectoryClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getFileSystemName} + * + *
+     * String fileSystemName = client.getFileSystemName();
+     * System.out.println("The name of the file system is " + fileSystemName);
+     * 
+ * * * @return The name of file system. */ @@ -205,7 +222,18 @@ public HttpPipeline getHttpPipeline() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.create} + * + *
+     * try {
+     *     client.create();
+     *     System.out.printf("Create completed%n");
+     * } catch (BlobStorageException error) {
+     *     if (error.getErrorCode().equals(BlobErrorCode.CONTAINER_ALREADY_EXISTS)) {
+     *         System.out.printf("Can't create file system. It already exists %n");
+     *     }
+     * }
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public void create() { @@ -219,7 +247,15 @@ public void create() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createWithResponse#Map-PublicAccessType-Duration-Context} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Create completed with status %d%n",
+     *     client.createWithResponse(metadata, PublicAccessType.CONTAINER, timeout, context).getStatusCode());
+     * 
+ * * * @param metadata Metadata to associate with the file system. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -244,7 +280,18 @@ public Response createWithResponse(Map metadata, PublicAcc * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.delete} + * + *
+     * try {
+     *     client.delete();
+     *     System.out.printf("Delete completed%n");
+     * } catch (BlobStorageException error) {
+     *     if (error.getErrorCode().equals(BlobErrorCode.CONTAINER_NOT_FOUND)) {
+     *         System.out.printf("Delete failed. File System was not found %n");
+     *     }
+     * }
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public void delete() { @@ -258,7 +305,17 @@ public void delete() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteWithResponse#DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Delete completed with status %d%n", client.deleteWithResponse(
+     *     requestConditions, timeout, context).getStatusCode());
+     * 
+ * * * @param requestConditions {@link DataLakeRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -279,7 +336,15 @@ public Response deleteWithResponse(DataLakeRequestConditions requestCondit * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getProperties} + * + *
+     * FileSystemProperties properties = client.getProperties();
+     * System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n",
+     *     properties.getDataLakePublicAccess(),
+     *     properties.hasLegalHold(),
+     *     properties.hasImmutabilityPolicy());
+     * 
+ * * * @return The file system properties. */ @@ -294,7 +359,18 @@ public FileSystemProperties getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getPropertiesWithResponse#String-Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     *
+     * FileSystemProperties properties = client.getPropertiesWithResponse(leaseId, timeout, context)
+     *     .getValue();
+     * System.out.printf("Public Access Type: %s, Legal Hold? %b, Immutable? %b%n",
+     *     properties.getDataLakePublicAccess(),
+     *     properties.hasLegalHold(),
+     *     properties.hasImmutabilityPolicy());
+     * 
+ * * * @param leaseId The lease ID the active lease on the file system must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -316,7 +392,17 @@ public Response getPropertiesWithResponse(String leaseId, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.setMetadata#Map} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * try {
+     *     client.setMetadata(metadata);
+     *     System.out.printf("Set metadata completed with status %n");
+     * } catch (UnsupportedOperationException error) {
+     *     System.out.printf("Fail while setting metadata %n");
+     * }
+     * 
+ * * * @param metadata Metadata to associate with the file system. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. @@ -332,7 +418,18 @@ public void setMetadata(Map metadata) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.setMetadataWithResponse#Map-DataLakeRequestConditions-Duration-Context} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Set metadata completed with status %d%n",
+     *     client.setMetadataWithResponse(metadata, requestConditions, timeout, context).getStatusCode());
+     * 
+ * * @param metadata Metadata to associate with the file system. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param requestConditions {@link DataLakeRequestConditions} @@ -355,7 +452,11 @@ public Response setMetadataWithResponse(Map metadata, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.listPaths} + * + *
+     * client.listPaths().forEach(path -> System.out.printf("Name: %s%n", path.getName()));
+     * 
+ * * * @return The list of files/directories. */ @@ -371,7 +472,15 @@ public PagedIterable listPaths() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.listPaths#ListPathsOptions-Duration} + * + *
+     * ListPathsOptions options = new ListPathsOptions()
+     *     .setPath("pathPrefixToMatch")
+     *     .setMaxResults(10);
+     *
+     * client.listPaths(options, timeout).forEach(path -> System.out.printf("Name: %s%n", path.getName()));
+     * 
+ * * * @param options A {@link ListPathsOptions} which specifies what data should be returned by the service. If * iterating by page, the page size passed to byPage methods such as @@ -392,7 +501,11 @@ public PagedIterable listPaths(ListPathsOptions options, Duration time * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.listDeletedPaths} + * + *
+     * client.listDeletedPaths().forEach(path -> System.out.printf("Name: %s%n", path.getPath()));
+     * 
+ * * * @return The list of files/directories. */ @@ -409,7 +522,18 @@ public PagedIterable listDeletedPaths() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.listDeletedPaths#String-Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * int pageSize = 10;
+     *
+     * client.listDeletedPaths("PathPrefixToMatch", timeout, context)
+     *     .iterableByPage(pageSize)
+     *     .forEach(page ->
+     *         page.getValue().forEach(path ->
+     *             System.out.printf("Name: %s%n", path.getPath())));
+     * 
+ * * * @param prefix Specifies the path to filter the results to. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -429,7 +553,11 @@ public PagedIterable listDeletedPaths(String prefix, Duration t * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createFile#String} + * + *
+     * DataLakeFileClient fileClient = client.createFile(fileName);
+     * 
+ * * * @param fileName Name of the file to create. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -446,7 +574,12 @@ public DataLakeFileClient createFile(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createFile#String-boolean} + * + *
+     * boolean overwrite = false; /* Default value. */
+     * DataLakeFileClient fClient = client.createFile(fileName, overwrite);
+     * 
+ * * * @param fileName Name of the file to create. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -469,7 +602,20 @@ public DataLakeFileClient createFile(String fileName, boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createFileWithResponse#String-String-String-PathHttpHeaders-Map-DataLakeRequestConditions-Duration-Context} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     * Response<DataLakeFileClient> newFileClient = client.createFileWithResponse(fileName, permissions, umask, httpHeaders,
+     *     Collections.singletonMap("metadata", "value"), requestConditions,
+     *     timeout, new Context(key1, value1));
+     * 
+ * * * @param fileName Name of the file to create. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -502,7 +648,12 @@ public Response createFileWithResponse(String fileName, Stri * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteFile#String} + * + *
+     * client.deleteFile(fileName);
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param fileName Name of the file to delete. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -519,7 +670,15 @@ public void deleteFile(String fileName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteFileWithResponse#String-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     *
+     * client.deleteFileWithResponse(fileName, requestConditions, timeout, new Context(key1, value1));
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param fileName Name of the file to delete. If the path name contains special characters, pass in the url encoded * version of the path name. @@ -541,7 +700,11 @@ public Response deleteFileWithResponse(String fileName, DataLakeRequestCon * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createDirectory#String} + * + *
+     * DataLakeDirectoryClient directoryClient = client.createDirectory(directoryName);
+     * 
+ * * * @param directoryName Name of the directory to create. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -558,7 +721,12 @@ public DataLakeDirectoryClient createDirectory(String directoryName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createDirectory#String-boolean} + * + *
+     * boolean overwrite = false; /* Default value. */
+     * DataLakeDirectoryClient dClient = client.createDirectory(fileName, overwrite);
+     * 
+ * * * @param directoryName Name of the directory to create. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -582,7 +750,20 @@ public DataLakeDirectoryClient createDirectory(String directoryName, boolean ove * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.createDirectoryWithResponse#String-String-String-PathHttpHeaders-Map-DataLakeRequestConditions-Duration-Context} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     * Response<DataLakeDirectoryClient> newDirectoryClient = client.createDirectoryWithResponse(directoryName,
+     *     permissions, umask, httpHeaders, Collections.singletonMap("metadata", "value"), requestConditions,
+     *     timeout, new Context(key1, value1));
+     * 
+ * * * @param directoryName Name of the directory to create. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -615,7 +796,12 @@ public Response createDirectoryWithResponse(String dire * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteDirectory#String} + * + *
+     * client.deleteDirectory(directoryName);
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param directoryName Name of the directory to delete. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -632,7 +818,17 @@ public void deleteDirectory(String directoryName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.deleteDirectoryWithResponse#String-boolean-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * boolean recursive = false; // Default value
+     *
+     * client.deleteDirectoryWithResponse(directoryName, recursive, requestConditions, timeout,
+     *     new Context(key1, value1));
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param directoryName Name of the directory to delete. If the path name contains special characters, pass in the * url encoded version of the path name. @@ -655,7 +851,12 @@ public Response deleteDirectoryWithResponse(String directoryName, boolean * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.undeletePath#String-String} + * + *
+     * client.undeletePath(deletedPath, deletionId);
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param deletedPath The deleted path * @param deletionId deletion ID associated with the soft deleted path that uniquely identifies a resource if @@ -676,7 +877,12 @@ public DataLakePathClient undeletePath(String deletedPath, String deletionId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.undeletePathWithResponse#String-String-Duration-Context} + * + *
+     * client.undeletePathWithResponse(deletedPath, deletionId, timeout, new Context(key1, value1));
+     * System.out.println("Delete request completed");
+     * 
+ * * * @param deletedPath The deleted path * @param deletionId deletion ID associated with the soft deleted path that uniquely identifies a resource if @@ -718,7 +924,18 @@ public Response undeletePathWithResponse(String deletedPath, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getAccessPolicy} + * + *
+     * FileSystemAccessPolicies accessPolicies = client.getAccessPolicy();
+     * System.out.printf("Data Lake Access Type: %s%n", accessPolicies.getDataLakeAccessType());
+     *
+     * for (DataLakeSignedIdentifier identifier : accessPolicies.getIdentifiers()) {
+     *     System.out.printf("Identifier Name: %s, Permissions %s%n",
+     *         identifier.getId(),
+     *         identifier.getAccessPolicy().getPermissions());
+     * }
+     * 
+ * * * @return The file system access policy. */ @@ -734,7 +951,20 @@ public FileSystemAccessPolicies getAccessPolicy() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.getAccessPolicyWithResponse#String-Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * FileSystemAccessPolicies accessPolicies = client.getAccessPolicyWithResponse(leaseId, timeout, context)
+     *     .getValue();
+     * System.out.printf("Data Lake Access Type: %s%n", accessPolicies.getDataLakeAccessType());
+     *
+     * for (DataLakeSignedIdentifier identifier : accessPolicies.getIdentifiers()) {
+     *     System.out.printf("Identifier Name: %s, Permissions %s%n",
+     *         identifier.getId(),
+     *         identifier.getAccessPolicy().getPermissions());
+     * }
+     * 
+ * * * @param leaseId The lease ID the active lease on the file system must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -759,7 +989,23 @@ public Response getAccessPolicyWithResponse(String lea * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.setAccessPolicy#PublicAccessType-List} + * + *
+     * DataLakeSignedIdentifier identifier = new DataLakeSignedIdentifier()
+     *     .setId("name")
+     *     .setAccessPolicy(new DataLakeAccessPolicy()
+     *         .setStartsOn(OffsetDateTime.now())
+     *         .setExpiresOn(OffsetDateTime.now().plusDays(7))
+     *         .setPermissions("permissionString"));
+     *
+     * try {
+     *     client.setAccessPolicy(PublicAccessType.CONTAINER, Collections.singletonList(identifier));
+     *     System.out.printf("Set Access Policy completed %n");
+     * } catch (UnsupportedOperationException error) {
+     *     System.out.printf("Set Access Policy completed %s%n", error);
+     * }
+     * 
+ * * * @param accessType Specifies how the data in this file system is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. @@ -782,7 +1028,29 @@ public void setAccessPolicy(PublicAccessType accessType, ListCode Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.setAccessPolicyWithResponse#PublicAccessType-List-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeSignedIdentifier identifier = new DataLakeSignedIdentifier()
+     *     .setId("name")
+     *     .setAccessPolicy(new DataLakeAccessPolicy()
+     *         .setStartsOn(OffsetDateTime.now())
+     *         .setExpiresOn(OffsetDateTime.now().plusDays(7))
+     *         .setPermissions("permissionString"));
+     *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId)
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Set access policy completed with status %d%n",
+     *     client.setAccessPolicyWithResponse(PublicAccessType.CONTAINER,
+     *         Collections.singletonList(identifier),
+     *         requestConditions,
+     *         timeout,
+     *         context).getStatusCode());
+     * 
+ * * * @param accessType Specifies how the data in this file system is available to the public. See the * x-ms-blob-public-access header in the Azure Docs for more information. Pass null for no public access. @@ -812,7 +1080,8 @@ public Response setAccessPolicyWithResponse(PublicAccessType accessType, // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.rename#String} +// * +// * // * // * @param destinationFileSystemName The new name of the file system. // * @return A {@link DataLakeFileSystemClient} used to interact with the renamed file system. @@ -827,7 +1096,8 @@ public Response setAccessPolicyWithResponse(PublicAccessType accessType, // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.renameWithResponse#FileSystemRenameOptions-Duration-Context} +// * +// * // * // * @param options {@link FileSystemRenameOptions} // * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -863,7 +1133,17 @@ BlobContainerClient getBlobContainerClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.generateUserDelegationSas#DataLakeServiceSasSignatureValues-UserDelegationKey} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * FileSystemSasPermission myPermission = new FileSystemSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey);
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -886,7 +1166,17 @@ public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemAsyncClient.generateUserDelegationSas#DataLakeServiceSasSignatureValues-UserDelegationKey-String-Context} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * FileSystemSasPermission myPermission = new FileSystemSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey, accountName, new Context("key", "value"));
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -910,7 +1200,17 @@ public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.generateSas#DataLakeServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * FileSystemSasPermission permission = new FileSystemSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @@ -927,7 +1227,18 @@ public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClient.generateSas#DataLakeServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * FileSystemSasPermission permission = new FileSystemSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemClientBuilder.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemClientBuilder.java index 4bf6c648f68f7..dc8025c28c2e2 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemClientBuilder.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeFileSystemClientBuilder.java @@ -85,7 +85,14 @@ public DataLakeFileSystemClientBuilder() { /** *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClientBuilder.buildClient} + * + *
+     * DataLakeFileSystemClient client = new DataLakeFileSystemClientBuilder()
+     *     .endpoint(endpoint)
+     *     .credential(storageSharedKeyCredential)
+     *     .buildClient();
+     * 
+ * * * @return a {@link DataLakeFileSystemClient} created from the configurations in this builder. * @throws IllegalStateException If multiple credentials have been specified. @@ -98,7 +105,14 @@ public DataLakeFileSystemClient buildClient() { /** *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeFileSystemClientBuilder.buildAsyncClient} + * + *
+     * DataLakeFileSystemAsyncClient client = new DataLakeFileSystemClientBuilder()
+     *     .endpoint(endpoint)
+     *     .credential(storageSharedKeyCredential)
+     *     .buildAsyncClient();
+     * 
+ * * * @return a {@link DataLakeFileSystemAsyncClient} created from the configurations in this builder. * @throws IllegalStateException If multiple credentials have been specified. diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java index 8a1506af12f51..8ab078a347b64 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathAsyncClient.java @@ -258,7 +258,12 @@ public DataLakeServiceVersion getServiceVersion() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.create} + * + *
+     * client.create().subscribe(response ->
+     *     System.out.printf("Last Modified Time:%s", response.getLastModified()));
+     * 
+ * * *

For more information see the * Azure @@ -280,7 +285,13 @@ public Mono create() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.create#boolean} + * + *
+     * boolean overwrite = true;
+     * client.create(overwrite).subscribe(response ->
+     *     System.out.printf("Last Modified Time:%s", response.getLastModified()));
+     * 
+ * * *

For more information see the * Azure @@ -308,7 +319,21 @@ public Mono create(boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.createWithResponse#String-String-PathHttpHeaders-Map-DataLakeRequestConditions} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     *
+     * client.createWithResponse(permissions, umask, httpHeaders, Collections.singletonMap("metadata", "value"),
+     *     requestConditions)
+     *     .subscribe(response -> System.out.printf("Last Modified Time:%s", response.getValue().getLastModified()));
+     * 
+ * * *

For more information see the * Azure @@ -385,7 +410,12 @@ Mono> deleteWithResponse(Boolean recursive, DataLakeRequestCondit * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setMetadata#Map} + * + *
+     * client.setMetadata(Collections.singletonMap("metadata", "value"))
+     *     .subscribe(response -> System.out.println("Set metadata completed"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -409,7 +439,15 @@ public Mono setMetadata(Map metadata) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setMetadata#Map-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     *
+     * client.setMetadataWithResponse(Collections.singletonMap("metadata", "value"), requestConditions)
+     *     .subscribe(response -> System.out.printf("Set metadata completed with status %d%n",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -437,7 +475,13 @@ public Mono> setMetadataWithResponse(Map metadata * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setHttpHeaders#PathHttpHeaders} + * + *
+     * client.setHttpHeaders(new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -460,7 +504,16 @@ public Mono setHttpHeaders(PathHttpHeaders headers) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setHttpHeadersWithResponse#PathHttpHeaders-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     *
+     * client.setHttpHeadersWithResponse(new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary"), requestConditions).subscribe(response ->
+     *     System.out.printf("Set HTTP headers completed with status %d%n", response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -486,7 +539,12 @@ public Mono> setHttpHeadersWithResponse(PathHttpHeaders headers, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.getProperties} + * + *
+     * client.getProperties().subscribe(response ->
+     *     System.out.printf("Creation Time: %s, Size: %d%n", response.getCreationTime(), response.getFileSize()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -507,7 +565,15 @@ public Mono getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.getPropertiesWithResponse#DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     *
+     * client.getPropertiesWithResponse(requestConditions).subscribe(
+     *     response -> System.out.printf("Creation Time: %s, Size: %d%n", response.getValue().getCreationTime(),
+     *         response.getValue().getFileSize()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -534,7 +600,11 @@ public Mono> getPropertiesWithResponse(DataLakeRequestC * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.exists} + * + *
+     * client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response));
+     * 
+ * * * @return true if the path exists, false if it doesn't */ @@ -555,7 +625,11 @@ public Mono exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.existsWithResponse} + * + *
+     * client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.getValue()));
+     * 
+ * * * @return true if the path exists, false if it doesn't */ @@ -574,7 +648,20 @@ public Mono> existsWithResponse() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setAccessControlList#List-String-String} + * + *
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     * String group = "group";
+     * String owner = "owner";
+     *
+     * client.setAccessControlList(pathAccessControlEntries, group, owner).subscribe(
+     *     response -> System.out.printf("Last Modified Time: %s", response.getLastModified()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -599,7 +686,21 @@ public Mono setAccessControlList(List accessCo * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setAccessControlListWithResponse#List-String-String-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     * String group = "group";
+     * String owner = "owner";
+     *
+     * client.setAccessControlListWithResponse(pathAccessControlEntries, group, owner, requestConditions).subscribe(
+     *     response -> System.out.printf("Last Modified Time: %s", response.getValue().getLastModified()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -626,7 +727,19 @@ public Mono> setAccessControlListWithResponse(ListCode Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setPermissions#PathPermissions-String-String} + * + *
+     * PathPermissions permissions = new PathPermissions()
+     *     .setGroup(new RolePermissions().setExecutePermission(true).setReadPermission(true))
+     *     .setOwner(new RolePermissions().setExecutePermission(true).setReadPermission(true).setWritePermission(true))
+     *     .setOther(new RolePermissions().setReadPermission(true));
+     * String group = "group";
+     * String owner = "owner";
+     *
+     * client.setPermissions(permissions, group, owner).subscribe(
+     *     response -> System.out.printf("Last Modified Time: %s", response.getLastModified()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -650,7 +763,20 @@ public Mono setPermissions(PathPermissions permissions, String group, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setPermissionsWithResponse#PathPermissions-String-String-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * PathPermissions permissions = new PathPermissions()
+     *     .setGroup(new RolePermissions().setExecutePermission(true).setReadPermission(true))
+     *     .setOwner(new RolePermissions().setExecutePermission(true).setReadPermission(true).setWritePermission(true))
+     *     .setOther(new RolePermissions().setReadPermission(true));
+     * String group = "group";
+     * String owner = "owner";
+     *
+     * client.setPermissionsWithResponse(permissions, group, owner, requestConditions).subscribe(
+     *     response -> System.out.printf("Last Modified Time: %s", response.getValue().getLastModified()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -704,7 +830,19 @@ Mono> setAccessControlWithResponse(ListCode Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setAccessControlRecursive#List} + * + *
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * client.setAccessControlRecursive(pathAccessControlEntries).subscribe(
+     *     response -> System.out.printf("Successful changed file operations: %d",
+     *         response.getCounters().getChangedFilesCount()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -730,7 +868,34 @@ public Mono setAccessControlRecursive(ListCode Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.setAccessControlRecursiveWithResponse#PathSetAccessControlRecursiveOptions} + * + *
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * Integer batchSize = 2;
+     * Integer maxBatches = 10;
+     * boolean continueOnFailure = false;
+     * String continuationToken = null;
+     * Consumer<Response<AccessControlChanges>> progressHandler =
+     *     response -> System.out.println("Received response");
+     *
+     * PathSetAccessControlRecursiveOptions options =
+     *     new PathSetAccessControlRecursiveOptions(pathAccessControlEntries)
+     *         .setBatchSize(batchSize)
+     *         .setMaxBatches(maxBatches)
+     *         .setContinueOnFailure(continueOnFailure)
+     *         .setContinuationToken(continuationToken)
+     *         .setProgressHandler(progressHandler);
+     *
+     * client.setAccessControlRecursive(pathAccessControlEntries).subscribe(
+     *     response -> System.out.printf("Successful changed file operations: %d",
+     *         response.getCounters().getChangedFilesCount()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -760,7 +925,19 @@ public Mono> setAccessControlRecursiveWithRe * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.updateAccessControlRecursive#List} + * + *
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * client.updateAccessControlRecursive(pathAccessControlEntries).subscribe(
+     *     response -> System.out.printf("Successful changed file operations: %d",
+     *         response.getCounters().getChangedFilesCount()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -788,7 +965,34 @@ public Mono updateAccessControlRecursive( * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.updateAccessControlRecursiveWithResponse#PathUpdateAccessControlRecursiveOptions} + * + *
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * Integer batchSize = 2;
+     * Integer maxBatches = 10;
+     * boolean continueOnFailure = false;
+     * String continuationToken = null;
+     * Consumer<Response<AccessControlChanges>> progressHandler =
+     *     response -> System.out.println("Received response");
+     *
+     * PathUpdateAccessControlRecursiveOptions options =
+     *     new PathUpdateAccessControlRecursiveOptions(pathAccessControlEntries)
+     *         .setBatchSize(batchSize)
+     *         .setMaxBatches(maxBatches)
+     *         .setContinueOnFailure(continueOnFailure)
+     *         .setContinuationToken(continuationToken)
+     *         .setProgressHandler(progressHandler);
+     *
+     * client.updateAccessControlRecursive(pathAccessControlEntries).subscribe(
+     *     response -> System.out.printf("Successful changed file operations: %d",
+     *         response.getCounters().getChangedFilesCount()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -818,7 +1022,18 @@ public Mono> updateAccessControlRecursiveWit * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.removeAccessControlRecursive#List} + * + *
+     * PathRemoveAccessControlEntry pathAccessControlEntry = new PathRemoveAccessControlEntry()
+     *     .setEntityId("entityId");
+     * List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * client.removeAccessControlRecursive(pathAccessControlEntries).subscribe(
+     *     response -> System.out.printf("Successful changed file operations: %d",
+     *         response.getCounters().getChangedFilesCount()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -846,7 +1061,33 @@ public Mono removeAccessControlRecursive( * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.removeAccessControlRecursiveWithResponse#PathRemoveAccessControlRecursiveOptions} + * + *
+     * PathRemoveAccessControlEntry pathAccessControlEntry = new PathRemoveAccessControlEntry()
+     *     .setEntityId("entityId");
+     * List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * Integer batchSize = 2;
+     * Integer maxBatches = 10;
+     * boolean continueOnFailure = false;
+     * String continuationToken = null;
+     * Consumer<Response<AccessControlChanges>> progressHandler =
+     *     response -> System.out.println("Received response");
+     *
+     * PathRemoveAccessControlRecursiveOptions options =
+     *     new PathRemoveAccessControlRecursiveOptions(pathAccessControlEntries)
+     *         .setBatchSize(batchSize)
+     *         .setMaxBatches(maxBatches)
+     *         .setContinueOnFailure(continueOnFailure)
+     *         .setContinuationToken(continuationToken)
+     *         .setProgressHandler(progressHandler);
+     *
+     * client.removeAccessControlRecursive(pathAccessControlEntries).subscribe(
+     *     response -> System.out.printf("Successful changed file operations: %d",
+     *         response.getCounters().getChangedFilesCount()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1018,7 +1259,14 @@ Determine if we are finished either because there is no new continuation (failur * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.getAccessControl} + * + *
+     * client.getAccessControl().subscribe(
+     *     response -> System.out.printf("Access Control List: %s, Group: %s, Owner: %s, Permissions: %s",
+     *         PathAccessControlEntry.serializeList(response.getAccessControlList()), response.getGroup(),
+     *         response.getOwner(), response.getPermissions()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1039,7 +1287,17 @@ public Mono getAccessControl() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.getAccessControlWithResponse#boolean-DataLakeRequestConditions} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * boolean userPrincipalNameReturned = false;
+     *
+     * client.getAccessControlWithResponse(userPrincipalNameReturned, requestConditions).subscribe(
+     *     response -> System.out.printf("Access Control List: %s, Group: %s, Owner: %s, Permissions: %s",
+     *         PathAccessControlEntry.serializeList(response.getValue().getAccessControlList()),
+     *         response.getValue().getGroup(), response.getValue().getOwner(), response.getValue().getPermissions()));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -1180,7 +1438,17 @@ BlockBlobAsyncClient getBlockBlobAsyncClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.generateUserDelegationSas#DataLakeServiceSasSignatureValues-UserDelegationKey} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * PathSasPermission myPermission = new PathSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey);
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -1202,7 +1470,17 @@ public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.generateUserDelegationSas#DataLakeServiceSasSignatureValues-UserDelegationKey-String-Context} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * PathSasPermission myPermission = new PathSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey, accountName, new Context("key", "value"));
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -1227,7 +1505,17 @@ public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.generateSas#DataLakeServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * PathSasPermission permission = new PathSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @@ -1244,7 +1532,18 @@ public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathAsyncClient.generateSas#DataLakeServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * PathSasPermission permission = new PathSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClient.java index 3a0bbdb4f9f15..0341dd5bafb70 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClient.java @@ -138,7 +138,11 @@ public DataLakeServiceVersion getServiceVersion() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.create} + * + *
+     * System.out.printf("Last Modified Time:%s", client.create().getLastModified());
+     * 
+ * * *

For more information see the * Azure @@ -156,7 +160,12 @@ public PathInfo create() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.create#boolean} + * + *
+     * boolean overwrite = true;
+     * System.out.printf("Last Modified Time:%s", client.create(true).getLastModified());
+     * 
+ * * *

For more information see the * Azure @@ -180,7 +189,22 @@ public PathInfo create(boolean overwrite) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.createWithResponse#String-String-PathHttpHeaders-Map-DataLakeRequestConditions-Duration-Context} + * + *
+     * PathHttpHeaders httpHeaders = new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary");
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions()
+     *     .setLeaseId(leaseId);
+     * String permissions = "permissions";
+     * String umask = "umask";
+     *
+     * Response<PathInfo> response = client.createWithResponse(permissions, umask, httpHeaders,
+     *     Collections.singletonMap("metadata", "value"), requestConditions, timeout,
+     *     new Context(key1, value1));
+     * System.out.printf("Last Modified Time:%s", response.getValue().getLastModified());
+     * 
+ * * *

For more information see the * Azure @@ -213,7 +237,12 @@ public Response createWithResponse(String permissions, String umask, P * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setMetadata#Map} + * + *
+     * client.setMetadata(Collections.singletonMap("metadata", "value"));
+     * System.out.println("Set metadata completed");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -232,7 +261,15 @@ public void setMetadata(Map metadata) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setMetadata#Map-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     *
+     * client.setMetadataWithResponse(Collections.singletonMap("metadata", "value"), requestConditions, timeout,
+     *     new Context(key2, value2));
+     * System.out.println("Set metadata completed");
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -258,7 +295,13 @@ public Response setMetadataWithResponse(Map metadata, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setHttpHeaders#PathHttpHeaders} + * + *
+     * client.setHttpHeaders(new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary"));
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -276,7 +319,17 @@ public void setHttpHeaders(PathHttpHeaders headers) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setHttpHeadersWithResponse#PathHttpHeaders-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     *
+     * Response<Void> response = client.setHttpHeadersWithResponse(new PathHttpHeaders()
+     *     .setContentLanguage("en-US")
+     *     .setContentType("binary"), requestConditions, timeout, new Context(key2, value2));
+     * System.out.printf("Set HTTP headers completed with status %d%n",
+     *             response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -300,7 +353,20 @@ public Response setHttpHeadersWithResponse(PathHttpHeaders headers, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setAccessControlList#List-String-String} + * + *
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     * String group = "group";
+     * String owner = "owner";
+     *
+     * System.out.printf("Last Modified Time: %s", client.setAccessControlList(pathAccessControlEntries, group, owner)
+     *     .getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -320,7 +386,22 @@ public PathInfo setAccessControlList(List accessControlL * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setAccessControlListWithResponse#List-String-String-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     * String group = "group";
+     * String owner = "owner";
+     *
+     * Response<PathInfo> response = client.setAccessControlListWithResponse(pathAccessControlEntries, group, owner,
+     *     requestConditions, timeout, new Context(key2, value2));
+     * System.out.printf("Last Modified Time: %s", response.getValue().getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -347,7 +428,19 @@ public Response setAccessControlListWithResponse(ListCode Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setPermissions#PathPermissions-String-String} + * + *
+     * PathPermissions permissions = new PathPermissions()
+     *     .setGroup(new RolePermissions().setExecutePermission(true).setReadPermission(true))
+     *     .setOwner(new RolePermissions().setExecutePermission(true).setReadPermission(true).setWritePermission(true))
+     *     .setOther(new RolePermissions().setReadPermission(true));
+     * String group = "group";
+     * String owner = "owner";
+     *
+     * System.out.printf("Last Modified Time: %s", client.setPermissions(permissions, group, owner)
+     *     .getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -367,7 +460,21 @@ public PathInfo setPermissions(PathPermissions permissions, String group, String * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setPermissionsWithResponse#PathPermissions-String-String-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * PathPermissions permissions = new PathPermissions()
+     *     .setGroup(new RolePermissions().setExecutePermission(true).setReadPermission(true))
+     *     .setOwner(new RolePermissions().setExecutePermission(true).setReadPermission(true).setWritePermission(true))
+     *     .setOther(new RolePermissions().setReadPermission(true));
+     * String group = "group";
+     * String owner = "owner";
+     *
+     * Response<PathInfo> response = client.setPermissionsWithResponse(permissions, group, owner, requestConditions,
+     *     timeout, new Context(key2, value2));
+     * System.out.printf("Last Modified Time: %s", response.getValue().getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -394,7 +501,20 @@ public Response setPermissionsWithResponse(PathPermissions permissions * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursive#List} + * + *
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * AccessControlChangeResult response = client.setAccessControlRecursive(pathAccessControlEntries);
+     *
+     * System.out.printf("Successful changed file operations: %d",
+     *     response.getCounters().getChangedFilesCount());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -416,7 +536,36 @@ public AccessControlChangeResult setAccessControlRecursive(ListCode Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.setAccessControlRecursiveWithResponse#PathSetAccessControlRecursiveOptions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * Integer batchSize = 2;
+     * Integer maxBatches = 10;
+     * boolean continueOnFailure = false;
+     * String continuationToken = null;
+     * Consumer<Response<AccessControlChanges>> progressHandler =
+     *     response -> System.out.println("Received response");
+     *
+     * PathSetAccessControlRecursiveOptions options =
+     *     new PathSetAccessControlRecursiveOptions(pathAccessControlEntries)
+     *         .setBatchSize(batchSize)
+     *         .setMaxBatches(maxBatches)
+     *         .setContinueOnFailure(continueOnFailure)
+     *         .setContinuationToken(continuationToken)
+     *         .setProgressHandler(progressHandler);
+     *
+     * Response<AccessControlChangeResult> response = client.setAccessControlRecursiveWithResponse(options, timeout,
+     *     new Context(key2, value2));
+     * System.out.printf("Successful changed file operations: %d",
+     *     response.getValue().getCounters().getChangedFilesCount());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -446,7 +595,20 @@ public Response setAccessControlRecursiveWithResponse * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursive#List} + * + *
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * AccessControlChangeResult response = client.updateAccessControlRecursive(pathAccessControlEntries);
+     *
+     * System.out.printf("Successful changed file operations: %d",
+     *     response.getCounters().getChangedFilesCount());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -468,7 +630,36 @@ public AccessControlChangeResult updateAccessControlRecursive(ListCode Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.updateAccessControlRecursiveWithResponse#PathUpdateAccessControlRecursiveOptions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * PathAccessControlEntry pathAccessControlEntry = new PathAccessControlEntry()
+     *     .setEntityId("entityId")
+     *     .setPermissions(new RolePermissions().setReadPermission(true));
+     * List<PathAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * Integer batchSize = 2;
+     * Integer maxBatches = 10;
+     * boolean continueOnFailure = false;
+     * String continuationToken = null;
+     * Consumer<Response<AccessControlChanges>> progressHandler =
+     *     response -> System.out.println("Received response");
+     *
+     * PathUpdateAccessControlRecursiveOptions options =
+     *     new PathUpdateAccessControlRecursiveOptions(pathAccessControlEntries)
+     *         .setBatchSize(batchSize)
+     *         .setMaxBatches(maxBatches)
+     *         .setContinueOnFailure(continueOnFailure)
+     *         .setContinuationToken(continuationToken)
+     *         .setProgressHandler(progressHandler);
+     *
+     * Response<AccessControlChangeResult> response = client.updateAccessControlRecursiveWithResponse(options, timeout,
+     *     new Context(key2, value2));
+     * System.out.printf("Successful changed file operations: %d",
+     *     response.getValue().getCounters().getChangedFilesCount());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -498,7 +689,19 @@ public Response updateAccessControlRecursiveWithRespo * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursive#List} + * + *
+     * PathRemoveAccessControlEntry pathAccessControlEntry = new PathRemoveAccessControlEntry()
+     *     .setEntityId("entityId");
+     * List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * AccessControlChangeResult response = client.removeAccessControlRecursive(pathAccessControlEntries);
+     *
+     * System.out.printf("Successful changed file operations: %d",
+     *     response.getCounters().getChangedFilesCount());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -521,7 +724,35 @@ public AccessControlChangeResult removeAccessControlRecursive( * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.removeAccessControlRecursiveWithResponse#PathRemoveAccessControlRecursiveOptions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * PathRemoveAccessControlEntry pathAccessControlEntry = new PathRemoveAccessControlEntry()
+     *     .setEntityId("entityId");
+     * List<PathRemoveAccessControlEntry> pathAccessControlEntries = new ArrayList<>();
+     * pathAccessControlEntries.add(pathAccessControlEntry);
+     *
+     * Integer batchSize = 2;
+     * Integer maxBatches = 10;
+     * boolean continueOnFailure = false;
+     * String continuationToken = null;
+     * Consumer<Response<AccessControlChanges>> progressHandler =
+     *     response -> System.out.println("Received response");
+     *
+     * PathRemoveAccessControlRecursiveOptions options =
+     *     new PathRemoveAccessControlRecursiveOptions(pathAccessControlEntries)
+     *         .setBatchSize(batchSize)
+     *         .setMaxBatches(maxBatches)
+     *         .setContinueOnFailure(continueOnFailure)
+     *         .setContinuationToken(continuationToken)
+     *         .setProgressHandler(progressHandler);
+     *
+     * Response<AccessControlChangeResult> response = client.removeAccessControlRecursiveWithResponse(options, timeout,
+     *     new Context(key2, value2));
+     * System.out.printf("Successful changed file operations: %d",
+     *     response.getValue().getCounters().getChangedFilesCount());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -552,7 +783,14 @@ public Response removeAccessControlRecursiveWithRespo * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.getAccessControl} + * + *
+     * PathAccessControl response = client.getAccessControl();
+     * System.out.printf("Access Control List: %s, Group: %s, Owner: %s, Permissions: %s",
+     *     PathAccessControlEntry.serializeList(response.getAccessControlList()), response.getGroup(),
+     *     response.getOwner(), response.getPermissions());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -569,7 +807,21 @@ public PathAccessControl getAccessControl() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.getAccessControlWithResponse#boolean-DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     * boolean userPrincipalNameReturned = false;
+     *
+     * Response<PathAccessControl> response = client.getAccessControlWithResponse(userPrincipalNameReturned,
+     *     requestConditions, timeout, new Context(key1, value1));
+     *
+     * PathAccessControl pac = response.getValue();
+     *
+     * System.out.printf("Access Control List: %s, Group: %s, Owner: %s, Permissions: %s",
+     *     PathAccessControlEntry.serializeList(pac.getAccessControlList()), pac.getGroup(), pac.getOwner(),
+     *     pac.getPermissions());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -595,7 +847,12 @@ public Response getAccessControlWithResponse(boolean userPrin * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.getProperties} + * + *
+     * System.out.printf("Creation Time: %s, Size: %d%n", client.getProperties().getCreationTime(),
+     *     client.getProperties().getFileSize());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -612,7 +869,17 @@ public PathProperties getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.getPropertiesWithResponse#DataLakeRequestConditions-Duration-Context} + * + *
+     * DataLakeRequestConditions requestConditions = new DataLakeRequestConditions().setLeaseId(leaseId);
+     *
+     * Response<PathProperties> response = client.getPropertiesWithResponse(requestConditions, timeout,
+     *     new Context(key2, value2));
+     *
+     * System.out.printf("Creation Time: %s, Size: %d%n", response.getValue().getCreationTime(),
+     *     response.getValue().getFileSize());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -640,7 +907,11 @@ public Response getPropertiesWithResponse(DataLakeRequestConditi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.exists} + * + *
+     * System.out.printf("Exists? %b%n", client.exists());
+     * 
+ * * * @return true if the path exists, false if it doesn't */ @@ -657,7 +928,11 @@ public Boolean exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.existsWithResponse#Duration-Context} + * + *
+     * System.out.printf("Exists? %b%n", client.existsWithResponse(timeout, new Context(key2, value2)).getValue());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -741,7 +1016,17 @@ BlockBlobClient getBlockBlobClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas#DataLakeServiceSasSignatureValues-UserDelegationKey} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * PathSasPermission myPermission = new PathSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey);
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -762,7 +1047,17 @@ public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.generateUserDelegationSas#DataLakeServiceSasSignatureValues-UserDelegationKey-String-Context} + * + *
+     * OffsetDateTime myExpiryTime = OffsetDateTime.now().plusDays(1);
+     * PathSasPermission myPermission = new PathSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues myValues = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateUserDelegationSas(values, userDelegationKey, accountName, new Context("key", "value"));
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. @@ -786,7 +1081,17 @@ public String generateUserDelegationSas(DataLakeServiceSasSignatureValues dataLa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.generateSas#DataLakeServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * PathSasPermission permission = new PathSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @@ -803,7 +1108,18 @@ public String generateSas(DataLakeServiceSasSignatureValues dataLakeServiceSasSi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClient.generateSas#DataLakeServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * PathSasPermission permission = new PathSasPermission().setReadPermission(true);
+     *
+     * DataLakeServiceSasSignatureValues values = new DataLakeServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param dataLakeServiceSasSignatureValues {@link DataLakeServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClientBuilder.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClientBuilder.java index 5428c4a1f5430..e9eac55b6e746 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClientBuilder.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakePathClientBuilder.java @@ -91,7 +91,14 @@ public DataLakePathClientBuilder() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClientBuilder.buildFileClient} + * + *
+     * DataLakeFileClient client = new DataLakePathClientBuilder()
+     *     .endpoint(endpoint)
+     *     .credential(storageSharedKeyCredential)
+     *     .buildFileClient();
+     * 
+ * * * @return a {@link DataLakeFileClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint} or {@code pathName} is {@code null}. @@ -107,7 +114,14 @@ public DataLakeFileClient buildFileClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClientBuilder.buildFileAsyncClient} + * + *
+     * DataLakeFileAsyncClient client = new DataLakePathClientBuilder()
+     *     .endpoint(endpoint)
+     *     .credential(storageSharedKeyCredential)
+     *     .buildFileAsyncClient();
+     * 
+ * * * @return a {@link DataLakeFileAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint} or {@code pathName} is {@code null}. @@ -141,7 +155,14 @@ public DataLakeFileAsyncClient buildFileAsyncClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClientBuilder.buildDirectoryClient} + * + *
+     * DataLakeDirectoryClient client = new DataLakePathClientBuilder()
+     *     .endpoint(endpoint)
+     *     .credential(storageSharedKeyCredential)
+     *     .buildDirectoryClient();
+     * 
+ * * * @return a {@link DataLakeDirectoryClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint} or {@code pathName} is {@code null}. @@ -156,7 +177,14 @@ public DataLakeDirectoryClient buildDirectoryClient() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakePathClientBuilder.buildDirectoryAsyncClient} + * + *
+     * DataLakeDirectoryAsyncClient client = new DataLakePathClientBuilder()
+     *     .endpoint(endpoint)
+     *     .credential(storageSharedKeyCredential)
+     *     .buildDirectoryAsyncClient();
+     * 
+ * * * @return a {@link DataLakeDirectoryAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint} or {@code pathName} is {@code null}. diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceAsyncClient.java index 7c58bd37b8d50..f1a4cdb5af535 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceAsyncClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceAsyncClient.java @@ -100,7 +100,11 @@ public class DataLakeServiceAsyncClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.getFileSystemAsyncClient#String} + * + *
+     * DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient = client.getFileSystemAsyncClient("fileSystemName");
+     * 
+ * * * @param fileSystemName The name of the file system to point to. A value of null or empty string will be * interpreted as pointing to the root file system and will be replaced by "$root". @@ -139,7 +143,12 @@ public DataLakeServiceVersion getServiceVersion() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.createFileSystem#String} + * + *
+     * DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient =
+     *     client.createFileSystem("fileSystemName").block();
+     * 
+ * * * @param fileSystemName Name of the file system to create * @return A {@link Mono} containing a {@link DataLakeFileSystemAsyncClient} used to interact with the file system @@ -161,7 +170,14 @@ public Mono createFileSystem(String fileSystemNam * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.createFileSystemWithResponse#String-Map-PublicAccessType} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     *
+     * DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient = client
+     *     .createFileSystemWithResponse("fileSystemName", metadata, PublicAccessType.CONTAINER).block().getValue();
+     * 
+ * * * @param fileSystemName Name of the file system to create * @param metadata Metadata to associate with the file system. If there is leading or trailing whitespace in any @@ -190,7 +206,13 @@ public Mono> createFileSystemWithRespons * Docs. *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.deleteFileSystem#String} + * + *
+     * client.deleteFileSystem("fileSystemName").subscribe(
+     *     response -> System.out.printf("Delete file system completed%n"),
+     *     error -> System.out.printf("Delete file system failed: %s%n", error));
+     * 
+ * * * @param fileSystemName Name of the file system to delete * @return A reactive response signalling completion. @@ -211,7 +233,12 @@ public Mono deleteFileSystem(String fileSystemName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.deleteFileSystemWithResponse#String-DataLakeRequestConditions} + * + *
+     * client.deleteFileSystemWithResponse("fileSystemName", new DataLakeRequestConditions()).subscribe(response ->
+     *     System.out.printf("Delete file system completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param fileSystemName Name of the file system to delete * @param requestConditions {@link DataLakeRequestConditions} @@ -242,7 +269,11 @@ public String getAccountUrl() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.listFileSystems} + * + *
+     * client.listFileSystems().subscribe(fileSystem -> System.out.printf("Name: %s%n", fileSystem.getName()));
+     * 
+ * * * @return A reactive response emitting the list of file systems. */ @@ -261,7 +292,15 @@ public PagedFlux listFileSystems() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.listFileSystems#ListFileSystemsOptions} + * + *
+     * ListFileSystemsOptions options = new ListFileSystemsOptions()
+     *     .setPrefix("fileSystemNamePrefixToMatch")
+     *     .setDetails(new FileSystemListDetails().setRetrieveMetadata(true));
+     *
+     * client.listFileSystems(options).subscribe(fileSystem -> System.out.printf("Name: %s%n", fileSystem.getName()));
+     * 
+ * * * @param options A {@link ListFileSystemsOptions} which specifies what data should be returned by the service. * @return A reactive response emitting the list of file systems. @@ -316,7 +355,14 @@ PagedFlux listFileSystemsWithOptionalTimeout(ListFileSystemsOpti * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.getProperties} + * + *
+     * client.getProperties().subscribe(response ->
+     *     System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b%n",
+     *         response.getHourMetrics().isEnabled(),
+     *         response.getMinuteMetrics().isEnabled()));
+     * 
+ * * * @return A reactive response containing the storage account properties. */ @@ -335,7 +381,14 @@ public Mono getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.getPropertiesWithResponse} + * + *
+     * client.getPropertiesWithResponse().subscribe(response ->
+     *     System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b%n",
+     *         response.getValue().getHourMetrics().isEnabled(),
+     *         response.getValue().getMinuteMetrics().isEnabled()));
+     * 
+ * * * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains the storage * account properties. @@ -362,7 +415,28 @@ public Mono> getPropertiesWithResponse() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.setProperties#DataLakeServiceProperties} + * + *
+     * DataLakeRetentionPolicy loggingRetentionPolicy = new DataLakeRetentionPolicy().setEnabled(true).setDays(3);
+     * DataLakeRetentionPolicy metricsRetentionPolicy = new DataLakeRetentionPolicy().setEnabled(true).setDays(1);
+     *
+     * DataLakeServiceProperties properties = new DataLakeServiceProperties()
+     *     .setLogging(new DataLakeAnalyticsLogging()
+     *         .setWrite(true)
+     *         .setDelete(true)
+     *         .setRetentionPolicy(loggingRetentionPolicy))
+     *     .setHourMetrics(new DataLakeMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy))
+     *     .setMinuteMetrics(new DataLakeMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy));
+     *
+     * client.setProperties(properties).subscribe(
+     *     response -> System.out.printf("Setting properties completed%n"),
+     *     error -> System.out.printf("Setting properties failed: %s%n", error));
+     * 
+ * * * @param properties Configures the service. * @return A {@link Mono} containing the storage account properties. @@ -385,7 +459,27 @@ public Mono setProperties(DataLakeServiceProperties properties) { * If CORS policies are set, CORS parameters that are not set default to the empty string.

*

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.setPropertiesWithResponse#DataLakeServiceProperties} + * + *
+     * loggingRetentionPolicy = new DataLakeRetentionPolicy().setEnabled(true).setDays(3);
+     * metricsRetentionPolicy = new DataLakeRetentionPolicy().setEnabled(true).setDays(1);
+     *
+     * properties = new DataLakeServiceProperties()
+     *     .setLogging(new DataLakeAnalyticsLogging()
+     *         .setWrite(true)
+     *         .setDelete(true)
+     *         .setRetentionPolicy(loggingRetentionPolicy))
+     *     .setHourMetrics(new DataLakeMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy))
+     *     .setMinuteMetrics(new DataLakeMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy));
+     *
+     * client.setPropertiesWithResponse(properties).subscribe(response ->
+     *     System.out.printf("Setting properties completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param properties Configures the service. * @return A {@link Mono} containing the storage account properties. @@ -406,7 +500,12 @@ public Mono> setPropertiesWithResponse(DataLakeServiceProperties * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.getUserDelegationKey#OffsetDateTime-OffsetDateTime} + * + *
+     * client.getUserDelegationKey(delegationKeyStartTime, delegationKeyExpiryTime).subscribe(response ->
+     *     System.out.printf("User delegation key: %s%n", response.getValue()));
+     * 
+ * * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. @@ -429,7 +528,12 @@ public Mono getUserDelegationKey(OffsetDateTime start, Offset * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.getUserDelegationKeyWithResponse#OffsetDateTime-OffsetDateTime} + * + *
+     * client.getUserDelegationKeyWithResponse(delegationKeyStartTime, delegationKeyExpiryTime).subscribe(response ->
+     *     System.out.printf("User delegation key: %s%n", response.getValue().getValue()));
+     * 
+ * * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. @@ -467,7 +571,22 @@ public String getAccountName() { * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to file * systems and file shares.

- * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.generateAccountSas#AccountSasSignatureValues} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = client.generateAccountSas(sasValues);
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @@ -484,7 +603,22 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to file * systems and file shares.

- * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.generateAccountSas#AccountSasSignatureValues-Context} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = client.generateAccountSas(sasValues, new Context("key", "value"));
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. @@ -503,7 +637,19 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.undeleteFileSystem#String-String} + * + *
+     * ListFileSystemsOptions listFileSystemsOptions = new ListFileSystemsOptions();
+     * listFileSystemsOptions.getDetails().setRetrieveDeleted(true);
+     * client.listFileSystems(listFileSystemsOptions).flatMap(
+     *     deletedFileSystem -> {
+     *         Mono<DataLakeFileSystemAsyncClient> fileSystemClient = client.undeleteFileSystem(
+     *             deletedFileSystem.getName(), deletedFileSystem.getVersion());
+     *         return fileSystemClient;
+     *     }
+     * ).then().block();
+     * 
+ * * * @param deletedFileSystemName The name of the previously deleted file system. * @param deletedFileSystemVersion The version of the previously deleted file system. @@ -529,7 +675,20 @@ public Mono undeleteFileSystem( * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.undeleteFileSystemWithResponse#FileSystemUndeleteOptions} + * + *
+     * ListFileSystemsOptions listFileSystemsOptions = new ListFileSystemsOptions();
+     * listFileSystemsOptions.getDetails().setRetrieveDeleted(true);
+     * client.listFileSystems(listFileSystemsOptions).flatMap(
+     *     deletedFileSystem -> {
+     *         Mono<DataLakeFileSystemAsyncClient> fileSystemClient = client.undeleteFileSystemWithResponse(
+     *             new FileSystemUndeleteOptions(deletedFileSystem.getName(), deletedFileSystem.getVersion()))
+     *             .map(Response::getValue);
+     *         return fileSystemClient;
+     *     }
+     * ).then().block();
+     * 
+ * * * @param options {@link FileSystemUndeleteOptions}. * @return A {@link Mono} containing a {@link Response} whose {@link Response#getValue() value} contains a {@link @@ -554,7 +713,8 @@ public Mono> undeleteFileSystemWithRespo // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.renameFileSystem#String-String} +// * +// * // * // * @param sourceFileSystemName The current name of the file system. // * @param destinationFileSystemName The new name of the file system. @@ -573,7 +733,8 @@ public Mono> undeleteFileSystemWithRespo // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceAsyncClient.renameFileSystemWithResponse#FileSystemRenameOptions} +// * +// * // * // * @param sourceFileSystemName The current name of the file system. // * @param options {@link FileSystemRenameOptions} diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceClient.java index 578a7de240f3e..2728dc1237128 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/DataLakeServiceClient.java @@ -67,7 +67,11 @@ public class DataLakeServiceClient { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.getFileSystemClient#String} + * + *
+     * DataLakeFileSystemClient dataLakeFileSystemClient = client.getFileSystemClient("fileSystemName");
+     * 
+ * * * @param fileSystemName The name of the file system to point to. * @return A {@link DataLakeFileSystemClient} object pointing to the specified file system @@ -102,7 +106,11 @@ public DataLakeServiceVersion getServiceVersion() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.createFileSystem#String} + * + *
+     * DataLakeFileSystemClient dataLakeFileSystemClient = client.createFileSystem("fileSystemName");
+     * 
+ * * * @param fileSystemName Name of the file system to create * @return The {@link DataLakeFileSystemClient} used to interact with the file system created. @@ -119,7 +127,18 @@ public DataLakeFileSystemClient createFileSystem(String fileSystemName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.createFileSystemWithResponse#String-Map-PublicAccessType-Context} + * + *
+     * Map<String, String> metadata = Collections.singletonMap("metadata", "value");
+     * Context context = new Context("Key", "Value");
+     *
+     * DataLakeFileSystemClient dataLakeFileSystemClient = client.createFileSystemWithResponse(
+     *     "fileSystemName",
+     *     metadata,
+     *     PublicAccessType.CONTAINER,
+     *     context).getValue();
+     * 
+ * * * @param fileSystemName Name of the file system to create * @param metadata Metadata to associate with the file system. If there is leading or trailing whitespace in any @@ -145,7 +164,16 @@ public Response createFileSystemWithResponse(String fi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.deleteFileSystem#String} + * + *
+     * try {
+     *     client.deleteFileSystem("fileSystemName");
+     *     System.out.printf("Delete file system completed with status %n");
+     * } catch (UnsupportedOperationException error) {
+     *     System.out.printf("Delete file system failed: %s%n", error);
+     * }
+     * 
+ * * * @param fileSystemName Name of the file system to delete */ @@ -161,7 +189,14 @@ public void deleteFileSystem(String fileSystemName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.deleteFileSystemWithResponse#String-DataLakeRequestConditions-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * System.out.printf("Delete file system completed with status %d%n",
+     *     client.deleteFileSystemWithResponse("fileSystemName", new DataLakeRequestConditions(), context)
+     *         .getStatusCode());
+     * 
+ * * * @param fileSystemName Name of the file system to delete * @param requestConditions {@link DataLakeRequestConditions} @@ -190,7 +225,11 @@ public String getAccountUrl() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.listFileSystems} + * + *
+     * client.listFileSystems().forEach(fileSystem -> System.out.printf("Name: %s%n", fileSystem.getName()));
+     * 
+ * * * @return The list of file systems. */ @@ -206,7 +245,16 @@ public PagedIterable listFileSystems() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.listFileSystems#ListFileSystemsOptions-Duration} + * + *
+     * ListFileSystemsOptions options = new ListFileSystemsOptions()
+     *     .setPrefix("filSystemNamePrefixToMatch")
+     *     .setDetails(new FileSystemListDetails().setRetrieveMetadata(true));
+     *
+     * client.listFileSystems(options, timeout).forEach(fileSystem -> System.out.printf("Name: %s%n",
+     *     fileSystem.getName()));
+     * 
+ * * * @param options A {@link ListFileSystemsOptions} which specifies what data should be returned by the service. * If iterating by page, the page size passed to byPage methods such as @@ -224,7 +272,15 @@ public PagedIterable listFileSystems(ListFileSystemsOptions opti * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.getProperties} + * + *
+     * DataLakeServiceProperties properties = client.getProperties();
+     *
+     * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b%n",
+     *     properties.getHourMetrics().isEnabled(),
+     *     properties.getMinuteMetrics().isEnabled());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -241,7 +297,16 @@ public DataLakeServiceProperties getProperties() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.getPropertiesWithResponse#Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * properties = client.getPropertiesWithResponse(timeout, context).getValue();
+     *
+     * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b%n",
+     *     properties.getHourMetrics().isEnabled(),
+     *     properties.getMinuteMetrics().isEnabled());
+     * 
+ * * *

For more information, see the * Azure Docs

@@ -268,7 +333,31 @@ public Response getPropertiesWithResponse(Duration ti * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.setProperties#DataLakeServiceProperties} + * + *
+     * DataLakeRetentionPolicy loggingRetentionPolicy = new DataLakeRetentionPolicy().setEnabled(true).setDays(3);
+     * DataLakeRetentionPolicy metricsRetentionPolicy = new DataLakeRetentionPolicy().setEnabled(true).setDays(1);
+     *
+     * DataLakeServiceProperties properties = new DataLakeServiceProperties()
+     *     .setLogging(new DataLakeAnalyticsLogging()
+     *         .setWrite(true)
+     *         .setDelete(true)
+     *         .setRetentionPolicy(loggingRetentionPolicy))
+     *     .setHourMetrics(new DataLakeMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy))
+     *     .setMinuteMetrics(new DataLakeMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy));
+     *
+     * try {
+     *     client.setProperties(properties);
+     *     System.out.printf("Setting properties completed%n");
+     * } catch (UnsupportedOperationException error) {
+     *     System.out.printf("Setting properties failed: %s%n", error);
+     * }
+     * 
+ * * * @param properties Configures the service. */ @@ -287,7 +376,29 @@ public void setProperties(DataLakeServiceProperties properties) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.setPropertiesWithResponse#DataLakeServiceProperties-Duration-Context} + * + *
+     * loggingRetentionPolicy = new DataLakeRetentionPolicy().setEnabled(true).setDays(3);
+     * metricsRetentionPolicy = new DataLakeRetentionPolicy().setEnabled(true).setDays(1);
+     *
+     * properties = new DataLakeServiceProperties()
+     *     .setLogging(new DataLakeAnalyticsLogging()
+     *         .setWrite(true)
+     *         .setDelete(true)
+     *         .setRetentionPolicy(loggingRetentionPolicy))
+     *     .setHourMetrics(new DataLakeMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy))
+     *     .setMinuteMetrics(new DataLakeMetrics()
+     *         .setEnabled(true)
+     *         .setRetentionPolicy(metricsRetentionPolicy));
+     *
+     * Context context = new Context("Key", "Value");
+     *
+     * System.out.printf("Setting properties completed with status %d%n",
+     *     client.setPropertiesWithResponse(properties, timeout, context).getStatusCode());
+     * 
+ * * * @param properties Configures the service. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -308,7 +419,12 @@ public Response setPropertiesWithResponse(DataLakeServiceProperties proper * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.getUserDelegationKey#OffsetDateTime-OffsetDateTime} + * + *
+     * System.out.printf("User delegation key: %s%n",
+     *     client.getUserDelegationKey(delegationKeyStartTime, delegationKeyExpiryTime));
+     * 
+ * * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. @@ -325,7 +441,12 @@ public UserDelegationKey getUserDelegationKey(OffsetDateTime start, OffsetDateTi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.getUserDelegationKeyWithResponse#OffsetDateTime-OffsetDateTime-Duration-Context} + * + *
+     * System.out.printf("User delegation key: %s%n",
+     *     client.getUserDelegationKeyWithResponse(delegationKeyStartTime, delegationKeyExpiryTime, timeout, context));
+     * 
+ * * * @param start Start time for the key's validity. Null indicates immediate start. * @param expiry Expiration of the key's validity. @@ -359,7 +480,22 @@ public String getAccountName() { * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to file * systems and file shares.

- * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.generateAccountSas#AccountSasSignatureValues} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = client.generateAccountSas(sasValues);
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @@ -376,7 +512,22 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to file * systems and file shares.

- * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.generateAccountSas#AccountSasSignatureValues-Context} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = client.generateAccountSas(sasValues, new Context("key", "value"));
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. @@ -396,7 +547,18 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.undeleteFileSystem#String-String} + * + *
+     * ListFileSystemsOptions listFileSystemsOptions = new ListFileSystemsOptions();
+     * listFileSystemsOptions.getDetails().setRetrieveDeleted(true);
+     * client.listFileSystems(listFileSystemsOptions, null).forEach(
+     *     deletedFileSystem -> {
+     *         DataLakeFileSystemClient fileSystemClient = client.undeleteFileSystem(
+     *             deletedFileSystem.getName(), deletedFileSystem.getVersion());
+     *     }
+     * );
+     * 
+ * * * @param deletedFileSystemName The name of the previously deleted file system. * @param deletedFileSystemVersion The version of the previously deleted file system. @@ -420,7 +582,19 @@ public DataLakeFileSystemClient undeleteFileSystem(String deletedFileSystemName, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.undeleteFileSystemWithResponse#FileSystemUndeleteOptions-Duration-Context} + * + *
+     * ListFileSystemsOptions listFileSystemsOptions = new ListFileSystemsOptions();
+     * listFileSystemsOptions.getDetails().setRetrieveDeleted(true);
+     * client.listFileSystems(listFileSystemsOptions, null).forEach(
+     *     deletedFileSystem -> {
+     *         DataLakeFileSystemClient fileSystemClient = client.undeleteFileSystemWithResponse(
+     *             new FileSystemUndeleteOptions(deletedFileSystem.getName(), deletedFileSystem.getVersion()), timeout,
+     *             context).getValue();
+     *     }
+     * );
+     * 
+ * * * @param options {@link FileSystemUndeleteOptions}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -444,7 +618,8 @@ public Response undeleteFileSystemWithResponse( // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.renameFileSystem#String-String} +// * +// * // * // * @param sourceFileSystemName The current name of the file system. // * @param destinationFileSystemName The new name of the file system. @@ -461,7 +636,8 @@ public Response undeleteFileSystemWithResponse( // * // *

Code Samples

// * -// * {@codesnippet com.azure.storage.file.datalake.DataLakeServiceClient.renameFileSystemWithResponse#String-FileSystemRenameOptions-Duration-Context} +// * +// * // * // * @param sourceFileSystemName The current name of the file system. // * @param options {@link FileSystemRenameOptions} diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseAsyncClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseAsyncClient.java index 6bbf941d43380..c8680f2ee18d5 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseAsyncClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseAsyncClient.java @@ -26,11 +26,29 @@ * *

Instantiating a DataLakeLeaseAsyncClient

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.asyncInstantiationWithFile} + * + *
+ * DataLakeLeaseAsyncClient dataLakeLeaseAsyncClient = new DataLakeLeaseClientBuilder()
+ *     .fileAsyncClient(fileAsyncClient)
+ *     .buildAsyncClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.asyncInstantiationWithDirectory} + * + *
+ * DataLakeLeaseAsyncClient dataLakeLeaseAsyncClient = new DataLakeLeaseClientBuilder()
+ *     .directoryAsyncClient(directoryAsyncClient)
+ *     .buildAsyncClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.asyncInstantiationWithFileSystem} + * + *
+ * DataLakeLeaseAsyncClient dataLakeLeaseAsyncClient = new DataLakeLeaseClientBuilder()
+ *     .fileSystemAsyncClient(dataLakeFileSystemAsyncClient)
+ *     .buildAsyncClient();
+ * 
+ * * *

View {@link DataLakeLeaseClientBuilder this} for additional ways to construct the client.

* @@ -75,7 +93,11 @@ public String getLeaseId() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.acquireLease#int} + * + *
+     * client.acquireLease(60).subscribe(response -> System.out.printf("Lease ID is %s%n", response));
+     * 
+ * * * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. * @return A reactive response containing the lease ID. @@ -91,7 +113,15 @@ public Mono acquireLease(int duration) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.acquireLeaseWithResponse#int-RequestConditions} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.acquireLeaseWithResponse(60, modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("Lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and @@ -111,7 +141,11 @@ public Mono> acquireLeaseWithResponse(int duration, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.renewLease} + * + *
+     * client.renewLease().subscribe(response -> System.out.printf("Renewed lease ID is %s%n", response));
+     * 
+ * * * @return A reactive response containing the renewed lease ID. */ @@ -125,7 +159,15 @@ public Mono renewLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.renewLeaseWithResponse#RequestConditions} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.renewLeaseWithResponse(modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("Renewed lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given @@ -143,7 +185,11 @@ public Mono> renewLeaseWithResponse(RequestConditions modifiedR * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.releaseLease} + * + *
+     * client.releaseLease().subscribe(response -> System.out.println("Completed release lease"));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -157,7 +203,15 @@ public Mono releaseLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.releaseLeaseWithResponse#RequestConditions} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.releaseLeaseWithResponse(modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("Release lease completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given @@ -175,7 +229,12 @@ public Mono> releaseLeaseWithResponse(RequestConditions modifiedR * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.breakLease} + * + *
+     * client.breakLease().subscribe(response ->
+     *     System.out.printf("The broken lease has %d seconds remaining on the lease", response));
+     * 
+ * * * @return A reactive response containing the remaining time in the broken lease in seconds. */ @@ -192,7 +251,16 @@ public Mono breakLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.breakLeaseWithResponse#Integer-RequestConditions} + * + *
+     * Integer retainLeaseInSeconds = 5;
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.breakLeaseWithResponse(retainLeaseInSeconds, modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("The broken lease has %d seconds remaining on the lease", response.getValue()));
+     * 
+ * * * @param breakPeriodInSeconds An optional duration, between 0 and 60 seconds, that the lease should continue before * it is broken. If the break period is longer than the time remaining on the lease the remaining time on the lease @@ -215,7 +283,11 @@ public Mono> breakLeaseWithResponse(Integer breakPeriodInSecon * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.changeLease#String} + * + *
+     * client.changeLease("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response));
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @return A reactive response containing the new lease ID. @@ -230,7 +302,15 @@ public Mono changeLease(String proposedId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseAsyncClient.changeLeaseWithResponse#String-RequestConditions} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * client.changeLeaseWithResponse("proposedId", modifiedRequestConditions).subscribe(response ->
+     *     System.out.printf("Changed lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseClient.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseClient.java index fd5c3318aca80..94a60c8f07079 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseClient.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseClient.java @@ -25,11 +25,29 @@ * *

Instantiating a DataLakeLeaseClient

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.syncInstantiationWithFile} + * + *
+ * DataLakeLeaseClient dataLakeLeaseClient = new DataLakeLeaseClientBuilder()
+ *     .fileClient(fileClient)
+ *     .buildClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.syncInstantiationWithDirectory} + * + *
+ * DataLakeLeaseClient dataLakeLeaseClient = new DataLakeLeaseClientBuilder()
+ *     .directoryClient(directoryClient)
+ *     .buildClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.syncInstantiationWithFileSystem} + * + *
+ * DataLakeLeaseClient dataLakeLeaseClient = new DataLakeLeaseClientBuilder()
+ *     .fileSystemClient(dataLakeFileSystemClient)
+ *     .buildClient();
+ * 
+ * * *

View {@link DataLakeLeaseClientBuilder this} for additional ways to construct the client.

* @@ -75,7 +93,11 @@ public String getLeaseId() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.acquireLease#int} + * + *
+     * System.out.printf("Lease ID is %s%n", client.acquireLease(60));
+     * 
+ * * * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. * @return The lease ID. @@ -91,7 +113,16 @@ public String acquireLease(int duration) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.acquireLeaseWithResponse#int-RequestConditions-Duration-Context} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfModifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("Lease ID is %s%n", client
+     *     .acquireLeaseWithResponse(60, modifiedRequestConditions, timeout, new Context(key, value))
+     *     .getValue());
+     * 
+ * * * @param duration The duration of the lease between 15 to 60 seconds or -1 for an infinite duration. * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and @@ -113,7 +144,11 @@ public Response acquireLeaseWithResponse(int duration, RequestConditions * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.renewLease} + * + *
+     * System.out.printf("Renewed lease ID is %s%n", client.renewLease());
+     * 
+ * * * @return The renewed lease ID. */ @@ -127,7 +162,16 @@ public String renewLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.renewLeaseWithResponse#RequestConditions-Duration-Context} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("Renewed lease ID is %s%n",
+     *     client.renewLeaseWithResponse(modifiedRequestConditions, timeout, new Context(key, value))
+     *         .getValue());
+     * 
+ * * * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given @@ -148,7 +192,12 @@ public Response renewLeaseWithResponse(RequestConditions modifiedRequest * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.releaseLease} + * + *
+     * client.releaseLease();
+     * System.out.println("Release lease completed");
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public void releaseLease() { @@ -160,7 +209,16 @@ public void releaseLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.releaseLeaseWithResponse#RequestConditions-Duration-Context} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("Release lease completed with status %d%n",
+     *     client.releaseLeaseWithResponse(modifiedRequestConditions, timeout, new Context(key, value))
+     *         .getStatusCode());
+     * 
+ * * * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and * LastModifiedTime are used to construct conditions related to when the resource was changed relative to the given @@ -181,7 +239,11 @@ public Response releaseLeaseWithResponse(RequestConditions modifiedRequest * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.breakLease} + * + *
+     * System.out.printf("The broken lease has %d seconds remaining on the lease", client.breakLease());
+     * 
+ * * * @return The remaining time in the broken lease in seconds. */ @@ -198,7 +260,17 @@ public Integer breakLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.breakLeaseWithResponse#Integer-RequestConditions-Duration-Context} + * + *
+     * Integer retainLeaseInSeconds = 5;
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("The broken lease has %d seconds remaining on the lease", client
+     *     .breakLeaseWithResponse(retainLeaseInSeconds, modifiedRequestConditions, timeout, new Context(key, value))
+     *     .getValue());
+     * 
+ * * * @param breakPeriodInSeconds An optional duration, between 0 and 60 seconds, that the lease should continue before * it is broken. If the break period is longer than the time remaining on the lease the remaining time on the lease @@ -224,7 +296,11 @@ public Response breakLeaseWithResponse(Integer breakPeriodInSeconds, * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.changeLease#String} + * + *
+     * System.out.printf("Changed lease ID is %s%n", client.changeLease("proposedId"));
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @return The new lease ID. @@ -239,7 +315,16 @@ public String changeLease(String proposedId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClient.changeLeaseWithResponse#String-RequestConditions-Duration-Context} + * + *
+     * RequestConditions modifiedRequestConditions = new RequestConditions()
+     *     .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(3));
+     *
+     * System.out.printf("Changed lease ID is %s%n",
+     *     client.changeLeaseWithResponse("proposedId", modifiedRequestConditions, timeout, new Context(key, value))
+     *         .getValue());
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @param modifiedRequestConditions Standard HTTP Access conditions related to the modification of data. ETag and diff --git a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseClientBuilder.java b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseClientBuilder.java index d618dbf9674af..25e32de49032b 100644 --- a/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseClientBuilder.java +++ b/sdk/storage/azure-storage-file-datalake/src/main/java/com/azure/storage/file/datalake/specialized/DataLakeLeaseClientBuilder.java @@ -34,19 +34,61 @@ * *

Instantiating LeaseClients

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.syncInstantiationWithFileAndLeaseId} + * + *
+ * DataLakeLeaseClient dataLakeLeaseClient = new DataLakeLeaseClientBuilder()
+ *     .fileClient(fileClient)
+ *     .leaseId(leaseId)
+ *     .buildClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.syncInstantiationWithDirectoryAndLeaseId} + * + *
+ * DataLakeLeaseClient dataLakeLeaseClient = new DataLakeLeaseClientBuilder()
+ *     .directoryClient(directoryClient)
+ *     .leaseId(leaseId)
+ *     .buildClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.syncInstantiationWithFileSystemAndLeaseId} + * + *
+ * DataLakeLeaseClient dataLakeLeaseClient = new DataLakeLeaseClientBuilder()
+ *     .fileSystemClient(dataLakeFileSystemClient)
+ *     .leaseId(leaseId)
+ *     .buildClient();
+ * 
+ * * *

Instantiating LeaseAsyncClients

* - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.asyncInstantiationWithFileAndLeaseId} + * + *
+ * DataLakeLeaseAsyncClient dataLakeLeaseAsyncClient = new DataLakeLeaseClientBuilder()
+ *     .fileAsyncClient(fileAsyncClient)
+ *     .leaseId(leaseId)
+ *     .buildAsyncClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.asyncInstantiationWithDirectoryAndLeaseId} + * + *
+ * DataLakeLeaseAsyncClient dataLakeLeaseAsyncClient = new DataLakeLeaseClientBuilder()
+ *     .directoryAsyncClient(directoryAsyncClient)
+ *     .leaseId(leaseId)
+ *     .buildAsyncClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.datalake.specialized.DataLakeLeaseClientBuilder.asyncInstantiationWithFileSystemAndLeaseId} + * + *
+ * DataLakeLeaseAsyncClient dataLakeLeaseAsyncClient = new DataLakeLeaseClientBuilder()
+ *     .fileSystemAsyncClient(dataLakeFileSystemAsyncClient)
+ *     .leaseId(leaseId)
+ *     .buildAsyncClient();
+ * 
+ * * * @see DataLakeLeaseClient * @see DataLakeLeaseAsyncClient diff --git a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/ReadmeSamples.java b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/ReadmeSamples.java index d8884272097ec..54f991174b0cf 100644 --- a/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/ReadmeSamples.java +++ b/sdk/storage/azure-storage-file-datalake/src/samples/java/com/azure/storage/file/datalake/ReadmeSamples.java @@ -16,129 +16,169 @@ */ public class ReadmeSamples { - private DataLakeServiceClient dataLakeServiceClient = new DataLakeServiceClientBuilder().buildClient(); - private DataLakeFileSystemClient dataLakeFileSystemClient = new DataLakeFileSystemClientBuilder().buildClient(); - private DataLakeFileClient dataLakeFileClient = new DataLakePathClientBuilder().buildFileClient(); - private DataLakeDirectoryClient dataLakeDirectoryClient = new DataLakePathClientBuilder().buildDirectoryClient(); + private final DataLakeServiceClient dataLakeServiceClient = new DataLakeServiceClientBuilder().buildClient(); + private final DataLakeFileSystemClient dataLakeFileSystemClient = new DataLakeFileSystemClientBuilder() + .buildClient(); + private final DataLakeFileClient dataLakeFileClient = new DataLakePathClientBuilder().buildFileClient(); + private final DataLakeDirectoryClient dataLakeDirectoryClient = new DataLakePathClientBuilder() + .buildDirectoryClient(); public void getDataLakeServiceClient1() { + // BEGIN: readme-sample-getDataLakeServiceClient1 DataLakeServiceClient dataLakeServiceClient = new DataLakeServiceClientBuilder() .endpoint("") .sasToken("") .buildClient(); + // END: readme-sample-getDataLakeServiceClient1 } public void getDataLakeServiceClient2() { + // BEGIN: readme-sample-getDataLakeServiceClient2 // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". DataLakeServiceClient dataLakeServiceClient = new DataLakeServiceClientBuilder() .endpoint("" + "?" + "") .buildClient(); + // END: readme-sample-getDataLakeServiceClient2 } public void getDataLakeFileSystemClient1() { + // BEGIN: readme-sample-getDataLakeFileSystemClient1 DataLakeFileSystemClient dataLakeFileSystemClient = dataLakeServiceClient.getFileSystemClient("myfilesystem"); + // END: readme-sample-getDataLakeFileSystemClient1 } public void getDataLakeFileSystemClient2() { + // BEGIN: readme-sample-getDataLakeFileSystemClient2 DataLakeFileSystemClient dataLakeFileSystemClient = new DataLakeFileSystemClientBuilder() .endpoint("") .sasToken("") .fileSystemName("myfilesystem") .buildClient(); + // END: readme-sample-getDataLakeFileSystemClient2 } public void getDataLakeFileSystemClient3() { + // BEGIN: readme-sample-getDataLakeFileSystemClient3 // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". DataLakeFileSystemClient dataLakeFileSystemClient = new DataLakeFileSystemClientBuilder() .endpoint("" + "/" + "myfilesystem" + "?" + "") .buildClient(); + // END: readme-sample-getDataLakeFileSystemClient3 } public void getFileClient1() { + // BEGIN: readme-sample-getFileClient1 DataLakeFileClient fileClient = dataLakeFileSystemClient.getFileClient("myfile"); + // END: readme-sample-getFileClient1 } public void getFileClient2() { + // BEGIN: readme-sample-getFileClient2 DataLakeFileClient fileClient = new DataLakePathClientBuilder() .endpoint("") .sasToken("") .fileSystemName("myfilesystem") .pathName("myfile") .buildFileClient(); + // END: readme-sample-getFileClient2 } public void getFileClient3() { + // BEGIN: readme-sample-getFileClient3 // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". DataLakeFileClient fileClient = new DataLakePathClientBuilder() .endpoint("" + "/" + "myfilesystem" + "/" + "myfile" + "?" + "") .buildFileClient(); + // END: readme-sample-getFileClient3 } public void getDirClient1() { + // BEGIN: readme-sample-getDirClient1 DataLakeDirectoryClient directoryClient = dataLakeFileSystemClient.getDirectoryClient("mydir"); + // END: readme-sample-getDirClient1 } public void getDirClient2() { + // BEGIN: readme-sample-getDirClient2 DataLakeDirectoryClient directoryClient = new DataLakePathClientBuilder() .endpoint("") .sasToken("") .fileSystemName("myfilesystem") .pathName("mydir") .buildDirectoryClient(); + // END: readme-sample-getDirClient2 } public void getDirClient3() { + // BEGIN: readme-sample-getDirClient3 // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". DataLakeDirectoryClient directoryClient = new DataLakePathClientBuilder() .endpoint("" + "/" + "myfilesystem" + "/" + "mydir" + "?" + "") .buildDirectoryClient(); + // END: readme-sample-getDirClient3 } public void createDataLakeFileSystemClient1() { + // BEGIN: readme-sample-createDataLakeFileSystemClient1 dataLakeServiceClient.createFileSystem("myfilesystem"); + // END: readme-sample-createDataLakeFileSystemClient1 } public void createDataLakeFileSystemClient2() { + // BEGIN: readme-sample-createDataLakeFileSystemClient2 dataLakeFileSystemClient.create(); + // END: readme-sample-createDataLakeFileSystemClient2 } public void enumeratePaths() { + // BEGIN: readme-sample-enumeratePaths for (PathItem pathItem : dataLakeFileSystemClient.listPaths()) { System.out.println("This is the path name: " + pathItem.getName()); } + // END: readme-sample-enumeratePaths } public void renameFile() { + // BEGIN: readme-sample-renameFile //Need to authenticate with azure identity and add role assignment "Storage Blob Data Contributor" to do the following operation. DataLakeFileClient fileClient = dataLakeFileSystemClient.getFileClient("myfile"); fileClient.create(); fileClient.rename("new-file-system-name", "new-file-name"); + // END: readme-sample-renameFile } public void renameDirectory() { + // BEGIN: readme-sample-renameDirectory //Need to authenticate with azure identity and add role assignment "Storage Blob Data Contributor" to do the following operation. DataLakeDirectoryClient directoryClient = dataLakeFileSystemClient.getDirectoryClient("mydir"); directoryClient.create(); directoryClient.rename("new-file-system-name", "new-directory-name"); + // END: readme-sample-renameDirectory } public void getPropertiesFile() { + // BEGIN: readme-sample-getPropertiesFile DataLakeFileClient fileClient = dataLakeFileSystemClient.getFileClient("myfile"); fileClient.create(); PathProperties properties = fileClient.getProperties(); + // END: readme-sample-getPropertiesFile } public void getPropertiesDirectory() { + // BEGIN: readme-sample-getPropertiesDirectory DataLakeDirectoryClient directoryClient = dataLakeFileSystemClient.getDirectoryClient("mydir"); directoryClient.create(); PathProperties properties = directoryClient.getProperties(); + // END: readme-sample-getPropertiesDirectory } public void authWithIdentity() { + // BEGIN: readme-sample-authWithIdentity DataLakeServiceClient storageClient = new DataLakeServiceClientBuilder() .endpoint("") .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); + // END: readme-sample-authWithIdentity } } diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareAsyncClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareAsyncClient.java index 669279232c3dd..5ef44e24b0c64 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareAsyncClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareAsyncClient.java @@ -70,7 +70,14 @@ * *

Instantiating an Asynchronous Share Client

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.instantiation} + * + *
+ * ShareAsyncClient client = new ShareClientBuilder()
+ *     .connectionString("${connectionString}")
+ *     .endpoint("${endpoint}")
+ *     .buildAsyncClient();
+ * 
+ * * *

View {@link ShareClientBuilder this} for additional ways to construct the azureFileStorageClient.

* @@ -196,7 +203,11 @@ public ShareAsyncClient getSnapshotClient(String snapshot) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.exists} + * + *
+     * client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response));
+     * 
+ * * * @return Flag indicating existence of the share. */ @@ -210,7 +221,11 @@ public Mono exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.existsWithResponse} + * + *
+     * client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.getValue()));
+     * 
+ * * * @return Flag indicating existence of the share. */ @@ -243,7 +258,16 @@ Mono> existsWithResponse(Context context) { * *

Create the share

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.create} + * + *
+     * shareAsyncClient.create().subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete creating the share!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -267,11 +291,27 @@ public Mono create() { * *

Create the share with metadata "share:metadata"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createWithResponse#map-integer.metadata} + * + *
+     * shareAsyncClient.createWithResponse(Collections.singletonMap("share", "metadata"), null).subscribe(
+     *     response -> System.out.printf("Creating the share completed with status code %d", response.getStatusCode()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete creating the share!")
+     * );
+     * 
+ * * *

Create the share with a quota of 10 GB

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createWithResponse#map-integer.quota} + * + *
+     * shareAsyncClient.createWithResponse(null, 10).subscribe(
+     *     response -> System.out.printf("Creating the share completed with status code %d", response.getStatusCode()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete creating the share!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -300,7 +340,18 @@ public Mono> createWithResponse(Map metadata * *

Create the share with optional parameters

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createWithResponse#ShareCreateOptions} + * + *
+     * shareAsyncClient.createWithResponse(new ShareCreateOptions()
+     *     .setMetadata(Collections.singletonMap("share", "metadata")).setQuotaInGb(1)
+     *     .setAccessTier(ShareAccessTier.HOT)).subscribe(
+     *         response -> System.out.printf("Creating the share completed with status code %d",
+     *             response.getStatusCode()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete creating the share!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -338,7 +389,16 @@ Mono> createWithResponse(ShareCreateOptions options, Context * *

Create a snapshot

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createSnapshot} + * + *
+     * shareAsyncClient.createSnapshot().subscribe(
+     *     response -> System.out.println("Successfully creating the share snapshot with snapshot id: "
+     *         + response.getSnapshot()),
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Complete creating the share snapshot.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -363,7 +423,16 @@ public Mono createSnapshot() { * *

Create a snapshot with metadata "snapshot:metadata"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createSnapshotWithResponse#map} + * + *
+     * shareAsyncClient.createSnapshotWithResponse(Collections.singletonMap("snapshot", "metadata")).subscribe(
+     *     response -> System.out.println("Successfully creating the share snapshot with snapshot id: "
+     *         + response.getValue().getSnapshot()),
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Complete creating the share snapshot.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -397,7 +466,15 @@ Mono> createSnapshotWithResponse(Map * *

Delete the share

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.delete} + * + *
+     * shareAsyncClient.delete().subscribe(
+     *     response -> System.out.println("Deleting the shareAsyncClient completed."),
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Complete deleting the share.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -421,7 +498,16 @@ public Mono delete() { * *

Delete the share

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.deleteWithResponse} + * + *
+     * shareAsyncClient.deleteWithResponse().subscribe(
+     *     response -> System.out.println("Deleting the shareAsyncClient completed with status code: "
+     *         + response.getStatusCode()),
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Complete deleting the share.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -445,7 +531,16 @@ public Mono> deleteWithResponse() { * *

Delete the share

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.deleteWithResponse#ShareDeleteOptions} + * + *
+     * shareAsyncClient.deleteWithResponse(new ShareDeleteOptions()
+     *     .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId))).subscribe(
+     *         response -> System.out.println("Deleting the shareAsyncClient completed with status code: "
+     *             + response.getStatusCode()), error -> System.err.println(error.toString()),
+     *         () -> System.out.println("Complete deleting the share.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -484,7 +579,14 @@ Mono> deleteWithResponse(ShareDeleteOptions options, Context cont * *

Retrieve the share properties

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getProperties} + * + *
+     * shareAsyncClient.getProperties()
+     *     .subscribe(properties -> {
+     *         System.out.printf("Share quota: %d, Metadata: %s", properties.getQuota(), properties.getMetadata());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -509,7 +611,15 @@ public Mono getProperties() { * *

Retrieve the share properties

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getPropertiesWithResponse} + * + *
+     * shareAsyncClient.getPropertiesWithResponse()
+     *     .subscribe(properties -> {
+     *         System.out.printf("Share quota: %d, Metadata: %s", properties.getValue().getQuota(),
+     *             properties.getValue().getMetadata());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -535,7 +645,16 @@ public Mono> getPropertiesWithResponse() { * *

Retrieve the share properties

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getPropertiesWithResponse#ShareGetPropertiesOptions} + * + *
+     * shareAsyncClient.getPropertiesWithResponse(new ShareGetPropertiesOptions()
+     *     .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))
+     *     .subscribe(properties -> {
+     *         System.out.printf("Share quota: %d, Metadata: %s", properties.getValue().getQuota(),
+     *             properties.getValue().getMetadata());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -572,7 +691,13 @@ Mono> getPropertiesWithResponse(ShareGetPropertiesOpti * *

Set the quota to 1024 GB

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setQuota#int} + * + *
+     * shareAsyncClient.setQuota(1024).doOnSuccess(response ->
+     *     System.out.println("Setting the share quota completed.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -599,7 +724,14 @@ public Mono setQuota(int quotaInGB) { * *

Set the quota to 1024 GB

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setQuotaWithResponse#int} + * + *
+     * shareAsyncClient.setQuotaWithResponse(1024)
+     *     .subscribe(response ->
+     *         System.out.printf("Setting the share quota completed with status code %d", response.getStatusCode())
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -625,7 +757,13 @@ public Mono> setQuotaWithResponse(int quotaInGB) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setProperties#ShareSetPropertiesOptions} + * + *
+     * shareAsyncClient.setProperties(new ShareSetPropertiesOptions().setAccessTier(ShareAccessTier.HOT)
+     *     .setQuotaInGb(2014))
+     *     .doOnSuccess(response -> System.out.println("Setting the share access tier completed."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -647,7 +785,15 @@ public Mono setProperties(ShareSetPropertiesOptions options) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setPropertiesWithResponse#ShareSetPropertiesOptions} + * + *
+     * shareAsyncClient.setPropertiesWithResponse(new ShareSetPropertiesOptions().setAccessTier(ShareAccessTier.HOT)
+     *     .setQuotaInGb(1024).setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))
+     *     .subscribe(response ->
+     *         System.out.printf("Setting the share quota completed with status code %d", response.getStatusCode())
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -685,11 +831,23 @@ Mono> setPropertiesWithResponse(ShareSetPropertiesOptions op * *

Set the metadata to "share:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setMetadata#map} + * + *
+     * shareAsyncClient.setMetadata(Collections.singletonMap("share", "updatedMetadata")).doOnSuccess(response ->
+     *     System.out.println("Setting the share metadata completed.")
+     * );
+     * 
+ * * *

Clear the metadata of the share

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.clearMetadata#map} + * + *
+     * shareAsyncClient.setMetadata(null).doOnSuccess(response ->
+     *     System.out.println("Setting the share metadata completed.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -716,11 +874,23 @@ public Mono setMetadata(Map metadata) { * *

Set the metadata to "share:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setMetadata#map} + * + *
+     * shareAsyncClient.setMetadata(Collections.singletonMap("share", "updatedMetadata")).doOnSuccess(response ->
+     *     System.out.println("Setting the share metadata completed.")
+     * );
+     * 
+ * * *

Clear the metadata of the share

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.clearMetadata#map} + * + *
+     * shareAsyncClient.setMetadata(null).doOnSuccess(response ->
+     *     System.out.println("Setting the share metadata completed.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -746,7 +916,16 @@ public Mono> setMetadataWithResponse(Map met * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setMetadataWithResponse#ShareSetMetadataOptions} + * + *
+     * shareAsyncClient.setMetadataWithResponse(new ShareSetMetadataOptions()
+     *     .setMetadata(Collections.singletonMap("share", "updatedMetadata"))
+     *     .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))
+     *     .subscribe(response ->
+     *         System.out.printf("Setting the share metadata completed with status code %d", response.getStatusCode())
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -783,7 +962,14 @@ Mono> setMetadataWithResponse(ShareSetMetadataOptions option * *

List the stored access policies

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getAccessPolicy} + * + *
+     * shareAsyncClient.getAccessPolicy()
+     *     .subscribe(result -> System.out.printf("Access policy %s allows these permissions: %s", result.getId(),
+     *         result.getAccessPolicy().getPermissions())
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -807,7 +993,15 @@ public PagedFlux getAccessPolicy() { * *

List the stored access policies

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getAccessPolicy#ShareGetAccessPolicyOptions} + * + *
+     * shareAsyncClient.getAccessPolicy(new ShareGetAccessPolicyOptions()
+     *     .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))
+     *     .subscribe(result -> System.out.printf("Access policy %s allows these permissions: %s", result.getId(),
+     *         result.getAccessPolicy().getPermissions())
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -846,7 +1040,17 @@ public PagedFlux getAccessPolicy(ShareGetAccessPolicyOpti * *

Set a read only stored access policy

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setAccessPolicy#List} + * + *
+     * ShareAccessPolicy accessPolicy = new ShareAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     *
+     * ShareSignedIdentifier permission = new ShareSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     * shareAsyncClient.setAccessPolicy(Collections.singletonList(permission)).doOnSuccess(
+     *     response -> System.out.println("Setting access policies completed."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -872,7 +1076,18 @@ public Mono setAccessPolicy(List permissions) * *

Set a read only stored access policy

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setAccessPolicyWithResponse#List} + * + *
+     * ShareAccessPolicy accessPolicy = new ShareAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     *
+     * ShareSignedIdentifier permission = new ShareSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     * shareAsyncClient.setAccessPolicyWithResponse(Collections.singletonList(permission))
+     *     .subscribe(response -> System.out.printf("Setting access policies completed completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -900,7 +1115,20 @@ public Mono> setAccessPolicyWithResponse(ListSet a read only stored access policy

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.setAccessPolicyWithResponse#ShareSetAccessPolicyOptions} + * + *
+     * ShareAccessPolicy accessPolicy = new ShareAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     *
+     * ShareSignedIdentifier permission = new ShareSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     * shareAsyncClient.setAccessPolicyWithResponse(new ShareSetAccessPolicyOptions()
+     *     .setPermissions(Collections.singletonList(permission))
+     *     .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))
+     *     .subscribe(response -> System.out.printf("Setting access policies completed completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -958,7 +1186,12 @@ OffsetDateTime.now will only give back milliseconds (more precise fields are zer * *

Retrieve the storage statistics

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getStatistics} + * + *
+     * shareAsyncClient.getStatistics().doOnSuccess(response -> System.out.printf("The share is using %d GB",
+     *     response.getShareUsageInGB()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -981,7 +1214,12 @@ public Mono getStatistics() { * *

Retrieve the storage statistics

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getStatisticsWithResponse} + * + *
+     * shareAsyncClient.getStatisticsWithResponse().subscribe(response -> System.out.printf("The share is using %d GB",
+     *     response.getValue().getShareUsageInGB()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1005,7 +1243,14 @@ public Mono> getStatisticsWithResponse() { * *

Retrieve the storage statistics

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getStatisticsWithResponse#ShareGetStatisticsOptions} + * + *
+     * shareAsyncClient.getStatisticsWithResponse(new ShareGetStatisticsOptions()
+     *     .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))
+     *     .subscribe(response -> System.out.printf("The share is using %d GB",
+     *     response.getValue().getShareUsageInGB()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1040,7 +1285,16 @@ Mono> getStatisticsWithResponse(ShareGetStatisticsOpti * *

Create the directory "mydirectory"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createDirectory#string} + * + *
+     * shareAsyncClient.createDirectory("mydirectory").subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete creating the directory!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1066,7 +1320,16 @@ public Mono createDirectory(String directoryName) { * *

Create the directory "documents" with metadata "directory:metadata"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createDirectoryWithResponse#String-FileSmbProperties-String-Map} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * shareAsyncClient.createDirectoryWithResponse("documents", smbProperties, filePermission,
+     *     Collections.singletonMap("directory", "metadata"))
+     *     .subscribe(response -> System.out.printf("Creating the directory completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1106,7 +1369,16 @@ Mono> createDirectoryWithResponse(String dir * *

Create the file "myfile" with size of 1024 bytes.

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createFile#string-long} + * + *
+     * shareAsyncClient.createFile("myfile", 1024).subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete creating the directory!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1140,7 +1412,27 @@ public Mono createFile(String fileName, long maxSize) { * *

Create the file "myfile" with length of 1024 bytes, some headers, file smb properties and metadata

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createFileWithResponse#String-long-ShareFileHttpHeaders-FileSmbProperties-String-Map} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * shareAsyncClient.createFileWithResponse("myfile", 1024, httpHeaders, smbProperties,
+     *     filePermission, Collections.singletonMap("directory", "metadata"))
+     *     .subscribe(response -> System.out.printf("Creating the file completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1178,7 +1470,30 @@ public Mono> createFileWithResponse(String fileNa * *

Create the file "myfile" with length of 1024 bytes, some headers, file smb properties and metadata

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createFileWithResponse#String-long-ShareFileHttpHeaders-FileSmbProperties-String-Map-ShareRequestConditions} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *
+     * shareAsyncClient.createFileWithResponse("myfile", 1024, httpHeaders, smbProperties,
+     *     filePermission, Collections.singletonMap("directory", "metadata"), requestConditions)
+     *     .subscribe(response -> System.out.printf("Creating the file completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1231,7 +1546,16 @@ Mono> createFileWithResponse(String fileName, lon * *

Delete the directory "mydirectory"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.deleteDirectory#string} + * + *
+     * shareAsyncClient.deleteDirectory("mydirectory").subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Complete deleting the directory.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1256,7 +1580,16 @@ public Mono deleteDirectory(String directoryName) { * *

Delete the directory "mydirectory"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.deleteDirectory#string} + * + *
+     * shareAsyncClient.deleteDirectory("mydirectory").subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Complete deleting the directory.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1285,7 +1618,16 @@ Mono> deleteDirectoryWithResponse(String directoryName, Context c * *

Delete the file "myfile"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.deleteFile#string} + * + *
+     * shareAsyncClient.deleteFile("myfile").subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Complete deleting the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1310,7 +1652,16 @@ public Mono deleteFile(String fileName) { * *

Delete the file "myfile"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.deleteFile#string} + * + *
+     * shareAsyncClient.deleteFile("myfile").subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Complete deleting the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1331,7 +1682,17 @@ public Mono> deleteFileWithResponse(String fileName) { * *

Delete the file "myfile"

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.deleteFile#string-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareAsyncClient.deleteFileWithResponse("myfile", requestConditions).subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Complete deleting the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1362,7 +1723,12 @@ Mono> deleteFileWithResponse(String fileName, ShareRequestConditi * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createPermission#string} + * + *
+     * shareAsyncClient.createPermission("filePermission").subscribe(
+     *     response -> System.out.printf("The file permission key is %s", response));
+     * 
+ * * * @param filePermission The file permission to get/create. * @return The file permission key associated with the file permission. @@ -1382,7 +1748,12 @@ public Mono createPermission(String filePermission) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.createPermissionWithResponse#string} + * + *
+     * shareAsyncClient.createPermissionWithResponse("filePermission").subscribe(
+     *     response -> System.out.printf("The file permission key is %s", response.getValue()));
+     * 
+ * * * @param filePermission The file permission to get/create. * @return A response that contains the file permission key associated with the file permission. @@ -1410,7 +1781,12 @@ Mono> createPermissionWithResponse(String filePermission, Conte * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getPermission#string} + * + *
+     * shareAsyncClient.getPermission("filePermissionKey").subscribe(
+     *     response -> System.out.printf("The file permission is %s", response));
+     * 
+ * * * @param filePermissionKey The file permission key. * @return The file permission associated with the file permission key. @@ -1429,7 +1805,12 @@ public Mono getPermission(String filePermissionKey) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getPermissionWithResponse#string} + * + *
+     * shareAsyncClient.getPermissionWithResponse("filePermissionKey").subscribe(
+     *     response -> System.out.printf("The file permission is %s", response.getValue()));
+     * 
+ * * * @param filePermissionKey The file permission key. * @return A response that contains th file permission associated with the file permission key. @@ -1456,7 +1837,19 @@ Mono> getPermissionWithResponse(String filePermissionKey, Conte * *

Get the share snapshot id.

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getSnapshotId} + * + *
+     * OffsetDateTime currentTime = OffsetDateTime.of(LocalDateTime.now(), ZoneOffset.UTC);
+     * ShareAsyncClient shareAsyncClient = new ShareClientBuilder()
+     *     .endpoint("https://${accountName}.file.core.windows.net")
+     *     .sasToken("${SASToken}")
+     *     .shareName("myshare")
+     *     .snapshot(currentTime.toString())
+     *     .buildAsyncClient();
+     *
+     * System.out.printf("Snapshot ID: %s%n", shareAsyncClient.getSnapshotId());
+     * 
+ * * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. @@ -1470,7 +1863,12 @@ public String getSnapshotId() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.getShareName} + * + *
+     * String shareName = shareAsyncClient.getShareName();
+     * System.out.println("The name of the share is " + shareName);
+     * 
+ * * * @return The name of the share. */ @@ -1503,7 +1901,17 @@ public HttpPipeline getHttpPipeline() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.generateSas#ShareServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareSasPermission permission = new ShareSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * shareAsyncClient.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @@ -1520,7 +1928,18 @@ public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatur * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.generateSas#ShareServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareSasPermission permission = new ShareSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * shareAsyncClient.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareClient.java index b8294ebfdae77..6f7a1139a962a 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareClient.java @@ -45,7 +45,14 @@ * *

Instantiating a Synchronous Share Client

* - * {@codesnippet com.azure.storage.file.share.ShareClient.instantiation} + * + *
+ * ShareClient client = new ShareClientBuilder()
+ *     .connectionString("${connectionString}")
+ *     .endpoint("${endpoint}")
+ *     .buildClient();
+ * 
+ * * *

View {@link ShareClientBuilder this} for additional ways to construct the client.

* @@ -142,7 +149,11 @@ public ShareClient getSnapshotClient(String snapshot) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareClient.exists} + * + *
+     * System.out.printf("Exists? %b%n", client.exists());
+     * 
+ * * * @return Flag indicating existence of the share. */ @@ -156,7 +167,12 @@ public Boolean exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareClient.existsWithResponse#Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * System.out.printf("Exists? %b%n", client.existsWithResponse(timeout, context).getValue());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -176,7 +192,12 @@ public Response existsWithResponse(Duration timeout, Context context) { * *

Create the share

* - * {@codesnippet com.azure.storage.file.share.ShareClient.create} + * + *
+     * ShareInfo response = shareClient.create();
+     * System.out.println("Complete creating the shares with status code: " + response);
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -196,11 +217,23 @@ public ShareInfo create() { * *

Create the share with metadata "share:metadata"

* - * {@codesnippet ShareClient.createWithResponse#map-integer-duration-context.metadata} + * + *
+     * Response<ShareInfo> response = shareClient.createWithResponse(Collections.singletonMap("share", "metadata"),
+     *     null, Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete creating the shares with status code: " + response.getStatusCode());
+     * 
+ * * *

Create the share with a quota of 10 GB

* - * {@codesnippet ShareClient.createWithResponse#map-integer-duration-context.quota} + * + *
+     * Response<ShareInfo> response = shareClient.createWithResponse(null, 10,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete creating the shares with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -229,7 +262,14 @@ public Response createWithResponse(Map metadata, Inte * *

Code Samples

* - * {@codesnippet ShareClient.createWithResponse#ShareCreateOptions-Duration-Context} + * + *
+     * Response<ShareInfo> response = shareClient.createWithResponse(new ShareCreateOptions()
+     *         .setMetadata(Collections.singletonMap("share", "metadata")).setQuotaInGb(1)
+     *         .setAccessTier(ShareAccessTier.HOT), Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete creating the shares with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -257,7 +297,12 @@ public Response createWithResponse(ShareCreateOptions options, Durati * *

Create a snapshot

* - * {@codesnippet com.azure.storage.file.share.ShareClient.createSnapshot} + * + *
+     * ShareSnapshotInfo response = shareClient.createSnapshot();
+     * System.out.println("Complete creating the share snpashot with snapshot id: " + response.getSnapshot());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -278,7 +323,14 @@ public ShareSnapshotInfo createSnapshot() { * *

Create a snapshot with metadata "snapshot:metadata"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.createSnapshotWithResponse#map-duration-context} + * + *
+     * Response<ShareSnapshotInfo> response =
+     *     shareClient.createSnapshotWithResponse(Collections.singletonMap("snpashot", "metadata"),
+     *         Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete creating the share snpashot with snapshot id: " + response.getValue().getSnapshot());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -308,7 +360,12 @@ public Response createSnapshotWithResponse(MapDelete the share

* - * {@codesnippet com.azure.storage.file.share.ShareClient.delete} + * + *
+     * shareClient.delete();
+     * System.out.println("Completed deleting the share.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -327,7 +384,12 @@ public void delete() { * *

Delete the share

* - * {@codesnippet com.azure.storage.file.share.ShareClient.deleteWithResponse#duration-context} + * + *
+     * Response<Void> response = shareClient.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete deleting the share with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -351,7 +413,14 @@ public Response deleteWithResponse(Duration timeout, Context context) { * *

Delete the share

* - * {@codesnippet com.azure.storage.file.share.ShareClient.deleteWithResponse#ShareDeleteOptions-Duration-Context} + * + *
+     * Response<Void> response = shareClient.deleteWithResponse(new ShareDeleteOptions()
+     *         .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete deleting the share with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -378,7 +447,12 @@ public Response deleteWithResponse(ShareDeleteOptions options, Duration ti * *

Retrieve the share properties

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getProperties} + * + *
+     * ShareProperties properties = shareClient.getProperties();
+     * System.out.printf("Share quota: %d, Metadata: %s", properties.getQuota(), properties.getMetadata());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -399,7 +473,13 @@ public ShareProperties getProperties() { * *

Retrieve the share properties

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getPropertiesWithResponse#duration-context} + * + *
+     * ShareProperties properties = shareClient.getPropertiesWithResponse(
+     *     Duration.ofSeconds(1), new Context(key1, value1)).getValue();
+     * System.out.printf("Share quota: %d, Metadata: %s", properties.getQuota(), properties.getMetadata());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -424,7 +504,14 @@ public Response getPropertiesWithResponse(Duration timeout, Con * *

Retrieve the share properties

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getPropertiesWithResponse#ShareGetPropertiesOptions-Duration-Context} + * + *
+     * ShareProperties properties = shareClient.getPropertiesWithResponse(new ShareGetPropertiesOptions()
+     *     .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)),
+     *     Duration.ofSeconds(1), new Context(key1, value1)).getValue();
+     * System.out.printf("Share quota: %d, Metadata: %s", properties.getQuota(), properties.getMetadata());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -451,7 +538,11 @@ public Response getPropertiesWithResponse(ShareGetPropertiesOpt * *

Set the quota to 1024 GB

* - * {@codesnippet ShareClient.setQuota#int} + * + *
+     * System.out.println("Setting the share quota completed." + shareClient.setQuota(1024));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -474,7 +565,13 @@ public ShareInfo setQuota(int quotaInGB) { * *

Set the quota to 1024 GB

* - * {@codesnippet com.azure.storage.file.share.ShareClient.setQuotaWithResponse#int-duration-context} + * + *
+     * Response<ShareInfo> response = shareClient.setQuotaWithResponse(1024,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting the share quota completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -499,7 +596,12 @@ public Response setQuotaWithResponse(int quotaInGB, Duration timeout, * *

Code Samples

* - * {@codesnippet ShareClient.setProperties#ShareSetPropertiesOptions} + * + *
+     * System.out.println("Setting the share access tier completed." + shareClient.setProperties(
+     *     new ShareSetPropertiesOptions().setAccessTier(ShareAccessTier.HOT).setQuotaInGb(1024)));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -517,7 +619,14 @@ public ShareInfo setProperties(ShareSetPropertiesOptions options) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareClient.setPropertiesWithResponse#ShareSetPropertiesOptions-Duration-Context} + * + *
+     * Response<ShareInfo> response = shareClient.setPropertiesWithResponse(
+     *     new ShareSetPropertiesOptions().setAccessTier(ShareAccessTier.HOT).setQuotaInGb(1024),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting the share access tier completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -544,11 +653,21 @@ public Response setPropertiesWithResponse(ShareSetPropertiesOptions o * *

Set the metadata to "share:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.setMetadata#map} + * + *
+     * shareClient.setMetadata(Collections.singletonMap("share", "updatedMetadata"));
+     * System.out.println("Setting the share metadata.");
+     * 
+ * * *

Clear the metadata of the share

* - * {@codesnippet com.azure.storage.file.share.ShareClient.clearMetadata#map} + * + *
+     * shareClient.setMetadata(null);
+     * System.out.println("Clear metadata completed.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -571,7 +690,14 @@ public ShareInfo setMetadata(Map metadata) { * *

Set the metadata to "share:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.setMetadataWithResponse#map-duration-context} + * + *
+     * Response<ShareInfo> response = shareClient.setMetadataWithResponse(
+     *     Collections.singletonMap("share", "updatedMetadata"), Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Setting the share metadata completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -599,7 +725,16 @@ public Response setMetadataWithResponse(Map metadata, * *

Set the metadata to "share:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.setMetadataWithResponse#ShareSetMetadataOptions-Duration-Context} + * + *
+     * Response<ShareInfo> response = shareClient.setMetadataWithResponse(new ShareSetMetadataOptions()
+     *         .setMetadata(Collections.singletonMap("share", "updatedMetadata"))
+     *         .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)),
+     *     Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Setting the share metadata completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -626,7 +761,14 @@ public Response setMetadataWithResponse(ShareSetMetadataOptions optio * *

List the stored access policies

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getAccessPolicy} + * + *
+     * for (ShareSignedIdentifier result : shareClient.getAccessPolicy()) {
+     *     System.out.printf("Access policy %s allows these permissions: %s",
+     *         result.getId(), result.getAccessPolicy().getPermissions());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -646,7 +788,16 @@ public PagedIterable getAccessPolicy() { * *

List the stored access policies

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getAccessPolicy#ShareGetAccessPolicyOptions} + * + *
+     * for (ShareSignedIdentifier result : shareClient
+     *     .getAccessPolicy(new ShareGetAccessPolicyOptions()
+     *         .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)))) {
+     *     System.out.printf("Access policy %s allows these permissions: %s",
+     *         result.getId(), result.getAccessPolicy().getPermissions());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -667,7 +818,18 @@ public PagedIterable getAccessPolicy(ShareGetAccessPolicy * *

Set a read only stored access policy

* - * {@codesnippet ShareClient.setAccessPolicy#List} + * + *
+     * ShareAccessPolicy accessPolicy = new ShareAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     *
+     * ShareSignedIdentifier permission = new ShareSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     *
+     * shareClient.setAccessPolicy(Collections.singletonList(permission));
+     * System.out.println("Setting access policies completed.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -689,7 +851,19 @@ public ShareInfo setAccessPolicy(List permissions) { * *

Set a read only stored access policy

* - * {@codesnippet com.azure.storage.file.share.ShareClient.setAccessPolicyWithResponse#list-duration-context} + * + *
+     * ShareAccessPolicy accessPolicy = new ShareAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     *
+     * ShareSignedIdentifier permission = new ShareSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     *
+     * Response<ShareInfo> response = shareClient.setAccessPolicyWithResponse(Collections.singletonList(permission),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting access policies completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -718,7 +892,21 @@ public Response setAccessPolicyWithResponse(ListSet a read only stored access policy

* - * {@codesnippet com.azure.storage.file.share.ShareClient.setAccessPolicyWithResponse#ShareSetAccessPolicyOptions-Duration-Context} + * + *
+     * ShareAccessPolicy accessPolicy = new ShareAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     *
+     * ShareSignedIdentifier permission = new ShareSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     *
+     * Response<ShareInfo> response = shareClient.setAccessPolicyWithResponse(
+     *     new ShareSetAccessPolicyOptions().setPermissions(Collections.singletonList(permission))
+     *         .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting access policies completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -747,7 +935,12 @@ public Response setAccessPolicyWithResponse(ShareSetAccessPolicyOptio * *

Retrieve the storage statistics

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getStatistics} + * + *
+     * ShareStatistics response = shareClient.getStatistics();
+     * System.out.printf("The share is using %d GB", response.getShareUsageInGB());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -766,7 +959,13 @@ public ShareStatistics getStatistics() { * *

Retrieve the storage statistics

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getStatisticsWithResponse#duration-context} + * + *
+     * Response<ShareStatistics> response = shareClient.getStatisticsWithResponse(
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("The share is using %d GB", response.getValue().getShareUsageInGB());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -789,7 +988,14 @@ public Response getStatisticsWithResponse(Duration timeout, Con * *

Retrieve the storage statistics

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getStatisticsWithResponse#ShareGetStatisticsOptions-Duration-Context} + * + *
+     * Response<ShareStatistics> response = shareClient.getStatisticsWithResponse(
+     *     new ShareGetStatisticsOptions().setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("The share is using %d GB", response.getValue().getShareUsageInGB());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -815,7 +1021,12 @@ public Response getStatisticsWithResponse(ShareGetStatisticsOpt * *

Create the directory "documents"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.createDirectory#string} + * + *
+     * ShareDirectoryClient response = shareClient.createDirectory("mydirectory");
+     * System.out.println("Complete creating the directory.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -838,7 +1049,16 @@ public ShareDirectoryClient createDirectory(String directoryName) { * *

Create the directory "documents" with metadata "directory:metadata"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.createDirectoryWithResponse#String-FileSmbProperties-String-Map-Duration-Context} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * Response<ShareDirectoryClient> response = shareClient.createDirectoryWithResponse("documents",
+     *     smbProperties, filePermission, Collections.singletonMap("directory", "metadata"),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Creating the directory completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -873,7 +1093,12 @@ public Response createDirectoryWithResponse(String directo * *

Create the file "myfile" with size of 1024 bytes.

* - * {@codesnippet com.azure.storage.file.share.ShareClient.createFile#string-long} + * + *
+     * ShareFileClient response = shareClient.createFile("myfile", 1024);
+     * System.out.println("Complete creating the file with snapshot Id:" + response.getShareSnapshotId());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -904,7 +1129,27 @@ public ShareFileClient createFile(String fileName, long maxSize) { * *

Create the file "myfile" with length of 1024 bytes, some headers, file smb properties and metadata

* - * {@codesnippet com.azure.storage.file.share.ShareClient.createFileWithResponse#String-long-ShareFileHttpHeaders-FileSmbProperties-String-Map-Duration-Context} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * Response<ShareFileClient> response = shareClient.createFileWithResponse("myfile", 1024,
+     *     httpHeaders, smbProperties, filePermission, Collections.singletonMap("directory", "metadata"),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Creating the file completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -946,7 +1191,30 @@ public Response createFileWithResponse(String fileName, long ma * *

Create the file "myfile" with length of 1024 bytes, some headers, file smb properties and metadata

* - * {@codesnippet com.azure.storage.file.share.ShareClient.createFileWithResponse#String-long-ShareFileHttpHeaders-FileSmbProperties-String-Map-ShareRequestConditions-Duration-Context} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *
+     * Response<ShareFileClient> response = shareClient.createFileWithResponse("myfile", 1024,
+     *     httpHeaders, smbProperties, filePermission, Collections.singletonMap("directory", "metadata"),
+     *     requestConditions, Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Creating the file completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -990,7 +1258,12 @@ public Response createFileWithResponse(String fileName, long ma * *

Delete the directory "mydirectory"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.deleteDirectory#string} + * + *
+     * shareClient.deleteDirectory("mydirectory");
+     * System.out.println("Completed deleting the directory.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1011,7 +1284,13 @@ public void deleteDirectory(String directoryName) { * *

Delete the directory "mydirectory"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.deleteDirectoryWithResponse#string-duration-context} + * + *
+     * Response<Void> response = shareClient.deleteDirectoryWithResponse("mydirectory",
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete deleting the directory with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1037,7 +1316,12 @@ public Response deleteDirectoryWithResponse(String directoryName, Duration * *

Delete the file "myfile"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.deleteFile#string} + * + *
+     * shareClient.deleteFile("myfile");
+     * System.out.println("Complete deleting the file.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1057,7 +1341,13 @@ public void deleteFile(String fileName) { * *

Delete the file "myfile"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.deleteFileWithResponse#string-duration-context} + * + *
+     * Response<Void> response = shareClient.deleteFileWithResponse("myfile",
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete deleting the file with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1082,7 +1372,14 @@ public Response deleteFileWithResponse(String fileName, Duration timeout, * *

Delete the file "myfile"

* - * {@codesnippet com.azure.storage.file.share.ShareClient.deleteFileWithResponse#string-ShareRequestConditions-duration-context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<Void> response = shareClient.deleteFileWithResponse("myfile", requestConditions,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete deleting the file with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1109,7 +1406,12 @@ public Response deleteFileWithResponse(String fileName, ShareRequestCondit * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareClient.createPermission#string} + * + *
+     * String response = shareClient.createPermission("filePermission");
+     * System.out.printf("The file permission key is %s", response);
+     * 
+ * * * @param filePermission The file permission to get/create. * @return The file permission key associated with the file permission. @@ -1125,7 +1427,12 @@ public String createPermission(String filePermission) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareClient.createPermissionWithResponse#string-context} + * + *
+     * Response<String> response = shareClient.createPermissionWithResponse("filePermission", Context.NONE);
+     * System.out.printf("The file permission key is %s", response.getValue());
+     * 
+ * * * @param filePermission The file permission to get/create. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -1141,7 +1448,12 @@ public Response createPermissionWithResponse(String filePermission, Cont * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getPermission#string} + * + *
+     * String response = shareClient.getPermission("filePermissionKey");
+     * System.out.printf("The file permission is %s", response);
+     * 
+ * * * @param filePermissionKey The file permission key. * @return The file permission associated with the file permission key. @@ -1156,7 +1468,12 @@ public String getPermission(String filePermissionKey) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getPermissionWithResponse#string-context} + * + *
+     * Response<String> response = shareClient.getPermissionWithResponse("filePermissionKey", Context.NONE);
+     * System.out.printf("The file permission is %s", response.getValue());
+     * 
+ * * * @param filePermissionKey The file permission key. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -1174,7 +1491,18 @@ public Response getPermissionWithResponse(String filePermissionKey, Cont * *

Get the share snapshot id.

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getSnapshotId} + * + *
+     * OffsetDateTime currentTime = OffsetDateTime.of(LocalDateTime.now(), ZoneOffset.UTC);
+     * ShareClient shareClient = new ShareClientBuilder().endpoint("https://${accountName}.file.core.windows.net")
+     *     .sasToken("${SASToken}")
+     *     .shareName("myshare")
+     *     .snapshot(currentTime.toString())
+     *     .buildClient();
+     *
+     * System.out.printf("Snapshot ID: %s%n", shareClient.getSnapshotId());
+     * 
+ * * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. @@ -1188,7 +1516,12 @@ public String getSnapshotId() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareClient.getShareName} + * + *
+     * String shareName = shareClient.getShareName();
+     * System.out.println("The name of the share is " + shareName);
+     * 
+ * * * @return The name of the share. */ @@ -1221,7 +1554,17 @@ public HttpPipeline getHttpPipeline() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.generateSas#ShareServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareSasPermission permission = new ShareSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * shareAsyncClient.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @@ -1238,7 +1581,18 @@ public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatur * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.generateSas#ShareServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareSasPermission permission = new ShareSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * shareAsyncClient.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareClientBuilder.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareClientBuilder.java index e8ba70ae9fd75..5364e0c08105d 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareClientBuilder.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareClientBuilder.java @@ -47,17 +47,47 @@ * client.

* *

Instantiating a synchronous Share Client with SAS token

- * {@codesnippet com.azure.storage.file.share.ShareClient.instantiation.sastoken} + * + *
+ * ShareClient shareClient = new ShareClientBuilder()
+ *     .endpoint("https://${accountName}.file.core.windows.net?${SASToken}")
+ *     .shareName("myshare")
+ *     .buildClient();
+ * 
+ * * *

Instantiating an Asynchronous Share Client with SAS token

- * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.instantiation.sastoken} + * + *
+ * ShareAsyncClient shareAsyncClient = new ShareClientBuilder()
+ *     .endpoint("https://{accountName}.file.core.windows.net?{SASToken}")
+ *     .shareName("myshare")
+ *     .buildAsyncClient();
+ * 
+ * * *

If the {@code endpoint} doesn't contain the query parameters to construct a SAS token it may be set using * {@link #sasToken(String) sasToken}.

* - * {@codesnippet com.azure.storage.file.share.ShareClient.instantiation.credential} + * + *
+ * ShareClient shareClient = new ShareClientBuilder()
+ *     .endpoint("https://${accountName}.file.core.windows.net")
+ *     .sasToken("${SASTokenQueryParams}")
+ *     .shareName("myshare")
+ *     .buildClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.instantiation.credential} + * + *
+ * ShareAsyncClient shareAsyncClient = new ShareClientBuilder()
+ *     .endpoint("https://{accountName}.file.core.windows.net")
+ *     .sasToken("${SASTokenQueryParams}")
+ *     .shareName("myshare")
+ *     .buildAsyncClient();
+ * 
+ * * *

Another way to authenticate the client is using a {@link StorageSharedKeyCredential}. To create a * StorageSharedKeyCredential a connection string from the Storage File service must be used. Set the @@ -66,10 +96,26 @@ * preferred when authorizing requests sent to the service.

* *

Instantiating a synchronous Share Client with connection string.

- * {@codesnippet com.azure.storage.file.share.ShareClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};"
+ *     + "EndpointSuffix={core.windows.net}";
+ * ShareClient shareClient = new ShareClientBuilder()
+ *     .connectionString(connectionString).shareName("myshare")
+ *     .buildClient();
+ * 
+ * * *

Instantiating an Asynchronous Share Client with connection string.

- * {@codesnippet com.azure.storage.file.share.ShareAsyncClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};"
+ *     + "EndpointSuffix={core.windows.net}";
+ * ShareAsyncClient shareAsyncClient = new ShareClientBuilder()
+ *     .connectionString(connectionString).shareName("myshare")
+ *     .buildAsyncClient();
+ * 
+ * * * @see ShareClient * @see ShareAsyncClient diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareDirectoryAsyncClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareDirectoryAsyncClient.java index c7b6ca1f33d43..b65c7dc7915e6 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareDirectoryAsyncClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareDirectoryAsyncClient.java @@ -72,7 +72,14 @@ * *

Instantiating an Asynchronous Directory Client

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.instantiation} + * + *
+ * ShareDirectoryAsyncClient client = new ShareFileClientBuilder()
+ *     .connectionString("${connectionString}")
+ *     .endpoint("${endpoint}")
+ *     .buildDirectoryAsyncClient();
+ * 
+ * * *

View {@link ShareFileClientBuilder this} for additional ways to construct the client.

* @@ -180,7 +187,11 @@ public ShareDirectoryAsyncClient getSubdirectoryClient(String subdirectoryName) * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.exists} + * + *
+     * client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response));
+     * 
+ * * * @return Flag indicating existence of the directory. */ @@ -194,7 +205,11 @@ public Mono exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.existsWithResponse} + * + *
+     * client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.getValue()));
+     * 
+ * * * @return Flag indicating existence of the directory. */ @@ -233,7 +248,16 @@ private boolean checkDoesNotExistStatusCode(Throwable t) { * *

Create the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.create} + * + *
+     * shareDirectoryAsyncClient.create().subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Completed creating the directory!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -258,7 +282,18 @@ public Mono create() { * *

Create the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.createWithResponse#FileSmbProperties-String-Map} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * Map<String, String> metadata = Collections.singletonMap("directory", "metadata");
+     * shareDirectoryAsyncClient.createWithResponse(smbProperties, filePermission, metadata).subscribe(
+     *     response ->
+     *         System.out.println("Completed creating the directory with status code:" + response.getStatusCode()),
+     *     error -> System.err.print(error.toString())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -310,7 +345,16 @@ Mono> createWithResponse(FileSmbProperties smbPrope * *

Delete the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.delete} + * + *
+     * shareDirectoryAsyncClient.delete().subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Completed deleting the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -334,7 +378,14 @@ public Mono delete() { * *

Delete the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.deleteWithResponse} + * + *
+     * shareDirectoryAsyncClient.deleteWithResponse().subscribe(
+     *     response -> System.out.printf("Delete completed with status code %d", response.getStatusCode()),
+     *     error -> System.err.println(error.toString())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -366,7 +417,13 @@ Mono> deleteWithResponse(Context context) { * *

Retrieve directory properties

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.getProperties} + * + *
+     * shareDirectoryAsyncClient.getProperties().subscribe(properties -> {
+     *     System.out.printf("Directory latest modified date is %s.", properties.getLastModified());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -390,7 +447,13 @@ public Mono getProperties() { * *

Retrieve directory properties

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.getPropertiesWithResponse} + * + *
+     * shareDirectoryAsyncClient.getPropertiesWithResponse().subscribe(properties -> {
+     *     System.out.printf("Directory latest modified date is %s:", properties.getValue().getLastModified());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -421,7 +484,15 @@ Mono> getPropertiesWithResponse(Context conte * *

Set directory properties

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.setProperties#FileSmbProperties-String} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * shareDirectoryAsyncClient.setProperties(smbProperties, filePermission).subscribe(properties -> {
+     *     System.out.printf("Directory latest modified date is %s:", properties.getLastModified());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -446,7 +517,15 @@ public Mono setProperties(FileSmbProperties smbProperties, S * *

Set directory properties

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.setPropertiesWithResponse#FileSmbProperties-String} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * shareDirectoryAsyncClient.setPropertiesWithResponse(smbProperties, filePermission).subscribe(properties -> {
+     *     System.out.printf("Directory latest modified date is %s:", properties.getValue().getLastModified());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -498,11 +577,21 @@ Mono> setPropertiesWithResponse(FileSmbProperties s * *

Set the metadata to "directory:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.setMetadata#map} + * + *
+     * shareDirectoryAsyncClient.setMetadata(Collections.singletonMap("directory", "updatedMetadata"))
+     *     .subscribe(response -> System.out.println("Setting the directory metadata completed."));
+     * 
+ * * *

Clear the metadata of the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.setMetadata#map.clearMetadata} + * + *
+     * shareDirectoryAsyncClient.setMetadata(null)
+     *     .doOnSuccess(response -> System.out.println("Clearing the directory metadata completed"));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -530,11 +619,23 @@ public Mono setMetadata(Map metad * *

Set the metadata to "directory:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.setMetadataWithResponse#map} + * + *
+     * shareDirectoryAsyncClient.setMetadataWithResponse(Collections.singletonMap("directory", "updatedMetadata"))
+     *     .subscribe(response -> System.out.println("Setting the directory metadata completed with status code:"
+     *         + response.getStatusCode()));
+     * 
+ * * *

Clear the metadata of the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.setMetadataWithResponse#map.clearMetadata} + * + *
+     * shareDirectoryAsyncClient.setMetadataWithResponse(null).subscribe(
+     *     response -> System.out.printf("Clearing the directory metadata completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -569,7 +670,16 @@ Mono> setMetadataWithResponse(MapList all sub-directories and files in the account

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.listFilesAndDirectories} + * + *
+     * shareDirectoryAsyncClient.listFilesAndDirectories().subscribe(
+     *     fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.",
+     *         fileRef.isDirectory(), fileRef.getName()),
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Completed listing the directories and files.")
+     * );
+     * 
+ * * *

For more information, see the * Azure @@ -593,7 +703,16 @@ public PagedFlux listFilesAndDirectories() { * *

List all sub-directories with "subdir" prefix and return 10 results in the account

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.listFilesAndDirectories#string-integer} + * + *
+     * shareDirectoryAsyncClient.listFilesAndDirectories("subdir", 10).subscribe(
+     *     fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.",
+     *         fileRef.isDirectory(), fileRef.getName()),
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Completed listing the directories and files.")
+     * );
+     * 
+ * * *

For more information, see the * Azure @@ -623,7 +742,16 @@ public PagedFlux listFilesAndDirectories(String prefix, Integer m * *

List all sub-directories with "subdir" prefix and return 10 results in the account

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.listFilesAndDirectories#ShareListFilesAndDirectoriesOptions} + * + *
+     * shareDirectoryAsyncClient.listFilesAndDirectories(new ShareListFilesAndDirectoriesOptions()
+     *     .setPrefix("subdir").setMaxResultsPerPage(10))
+     *     .subscribe(fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.",
+     *         fileRef.isDirectory(), fileRef.getName()),
+     *         error -> System.err.println(error.toString()),
+     *         () -> System.out.println("Completed listing the directories and files."));
+     * 
+ * * *

For more information, see the * Azure @@ -686,7 +814,13 @@ PagedFlux listFilesAndDirectoriesWithOptionalTimeout( * *

Get 10 handles with recursive call.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.listHandles#integer-boolean} + * + *
+     * shareDirectoryAsyncClient.listHandles(10, true)
+     *     .subscribe(handleItem -> System.out.printf("Get handles completed with handle id %s",
+     *         handleItem.getHandleId()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -728,7 +862,14 @@ PagedFlux listHandlesWithOptionalTimeout(Integer maxResultPerPage, b * *

Force close handles returned by list handles.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.forceCloseHandle#String} + * + *
+     * shareDirectoryAsyncClient.listHandles(null, true).subscribe(handleItem ->
+     *     shareDirectoryAsyncClient.forceCloseHandle(handleItem.getHandleId()).subscribe(ignored ->
+     *         System.out.printf("Closed handle %s on resource %s%n",
+     *             handleItem.getHandleId(), handleItem.getPath())));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -753,7 +894,14 @@ public Mono forceCloseHandle(String handleId) { * *

Force close handles returned by list handles.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.forceCloseHandleWithResponse#String} + * + *
+     * shareDirectoryAsyncClient.listHandles(null, true).subscribe(handleItem ->
+     *     shareDirectoryAsyncClient.forceCloseHandleWithResponse(handleItem.getHandleId()).subscribe(response ->
+     *         System.out.printf("Closing handle %s on resource %s completed with status code %d%n",
+     *             handleItem.getHandleId(), handleItem.getPath(), response.getStatusCode())));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -786,7 +934,13 @@ Mono> forceCloseHandleWithResponse(String handleId, C * *

Force close all handles recursively.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.forceCloseAllHandles#boolean} + * + *
+     * shareDirectoryAsyncClient.forceCloseAllHandles(true).subscribe(closeHandlesInfo ->
+     *     System.out.printf("Closed %d open handles on the directory%nFailed to close %d open handles on the "
+     *         + "directory%n", closeHandlesInfo.getClosedHandles(), closeHandlesInfo.getFailedHandles()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -832,7 +986,12 @@ PagedFlux forceCloseAllHandlesWithTimeout(boolean recursive, D * *

Create the sub directory "subdir"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.createSubdirectory#string} + * + *
+     * shareDirectoryAsyncClient.createSubdirectory("subdir")
+     *     .doOnSuccess(response -> System.out.println("Completed creating the subdirectory."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -860,7 +1019,19 @@ public Mono createSubdirectory(String subdirectoryNam * *

Create the subdirectory named "subdir", with metadata

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.createSubdirectoryWithResponse#String-FileSmbProperties-String-Map} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * Map<String, String> metadata = Collections.singletonMap("directory", "metadata");
+     * shareDirectoryAsyncClient.createSubdirectoryWithResponse("subdir", smbProperties, filePermission, metadata).subscribe(
+     *     response ->
+     *         System.out.println("Successfully creating the subdirectory with status code: "
+     *             + response.getStatusCode()),
+     *     error -> System.err.println(error.toString())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -899,7 +1070,16 @@ Mono> createSubdirectoryWithResponse(String * *

Delete the subdirectory named "subdir"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.deleteSubdirectory#string} + * + *
+     * shareDirectoryAsyncClient.deleteSubdirectory("mysubdirectory").subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Completed deleting the subdirectory.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -925,7 +1105,16 @@ public Mono deleteSubdirectory(String subdirectoryName) { * *

Delete the subdirectory named "subdir"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.deleteSubdirectoryWithResponse#string} + * + *
+     * shareDirectoryAsyncClient.deleteSubdirectoryWithResponse("mysubdirectory").subscribe(
+     *     response -> System.out.printf("Delete subdirectory completed with status code %d",
+     *         response.getStatusCode()),
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Completed deleting the subdirectory.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -958,7 +1147,16 @@ Mono> deleteSubdirectoryWithResponse(String subdirectoryName, Con * *

Create 1k file with named "myFile"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.createFile#string-long} + * + *
+     * shareDirectoryAsyncClient.createFile("myfile", 1024).subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Completed creating the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -987,7 +1185,29 @@ public Mono createFile(String fileName, long maxSize) { * *

Create the file named "myFile"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.createFileWithResponse#String-long-ShareFileHttpHeaders-FileSmbProperties-String-Map} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * shareDirectoryAsyncClient.createFileWithResponse("myFile", 1024, httpHeaders, smbProperties, filePermission,
+     *     Collections.singletonMap("directory", "metadata")).subscribe(
+     *         response -> System.out.printf("Creating the file completed with status code %d", response.getStatusCode()),
+     *         error -> System.err.println(error.toString()),
+     *         () -> System.out.println("Completed creating the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1018,7 +1238,32 @@ public Mono> createFileWithResponse(String fileNa * *

Create the file named "myFile"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.createFileWithResponse#String-long-ShareFileHttpHeaders-FileSmbProperties-String-Map-ShareRequestConditions} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *
+     * shareDirectoryAsyncClient.createFileWithResponse("myFile", 1024, httpHeaders, smbProperties, filePermission,
+     *     Collections.singletonMap("directory", "metadata"), requestConditions).subscribe(
+     *         response -> System.out.printf("Creating the file completed with status code %d", response.getStatusCode()),
+     *         error -> System.err.println(error.toString()),
+     *         () -> System.out.println("Completed creating the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1063,7 +1308,16 @@ Mono> createFileWithResponse(String fileName, lon * *

Delete the file "filetest"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.deleteFile#string} + * + *
+     * shareDirectoryAsyncClient.deleteFile("myfile").subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Completed deleting the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1089,7 +1343,15 @@ public Mono deleteFile(String fileName) { * *

Delete the file "filetest"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.deleteFileWithResponse#string} + * + *
+     * shareDirectoryAsyncClient.deleteFileWithResponse("myfile").subscribe(
+     *     response -> System.out.printf("Delete file completed with status code %d", response.getStatusCode()),
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Completed deleting the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1111,7 +1373,16 @@ public Mono> deleteFileWithResponse(String fileName) { * *

Delete the file "filetest"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.deleteFileWithResponse#string-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareDirectoryAsyncClient.deleteFileWithResponse("myfile", requestConditions).subscribe(
+     *     response -> System.out.printf("Delete file completed with status code %d", response.getStatusCode()),
+     *     error -> System.err.println(error.toString()),
+     *     () -> System.out.println("Completed deleting the file.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1146,7 +1417,20 @@ Mono> deleteFileWithResponse(String fileName, ShareRequestConditi * *

Get the share snapshot id.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.getShareSnapshotId} + * + *
+     * OffsetDateTime currentTime = OffsetDateTime.of(LocalDateTime.now(), ZoneOffset.UTC);
+     * ShareDirectoryAsyncClient shareDirectoryAsyncClient = new ShareFileClientBuilder()
+     *     .endpoint("https://${accountName}.file.core.windows.net")
+     *     .sasToken("${SASToken}")
+     *     .shareName("myshare")
+     *     .resourcePath("mydirectory")
+     *     .snapshot(currentTime.toString())
+     *     .buildDirectoryAsyncClient();
+     *
+     * System.out.printf("Snapshot ID: %s%n", shareDirectoryAsyncClient.getShareSnapshotId());
+     * 
+ * * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. @@ -1160,7 +1444,12 @@ public String getShareSnapshotId() { * *

Get the share name.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.getShareName} + * + *
+     * String shareName = shareDirectoryAsyncClient.getShareName();
+     * System.out.println("The share name of the directory is " + shareName);
+     * 
+ * * * @return The share name of the directory. */ @@ -1173,7 +1462,12 @@ public String getShareName() { * *

Get directory path.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.getDirectoryPath} + * + *
+     * String directoryPath = shareDirectoryAsyncClient.getDirectoryPath();
+     * System.out.println("The name of the directory is " + directoryPath);
+     * 
+ * * * @return The path of the directory. */ @@ -1207,7 +1501,17 @@ public HttpPipeline getHttpPipeline() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.generateSas#ShareServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareFileSasPermission permission = new ShareFileSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * shareDirectoryAsyncClient.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @@ -1224,7 +1528,18 @@ public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatur * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.generateSas#ShareServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareFileSasPermission permission = new ShareFileSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * shareDirectoryAsyncClient.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareDirectoryClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareDirectoryClient.java index 9f03fc3842849..a5b90a8c6c810 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareDirectoryClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareDirectoryClient.java @@ -39,7 +39,14 @@ * *

Instantiating an Synchronous Directory Client

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.instantiation} + * + *
+ * ShareDirectoryClient client = new ShareFileClientBuilder()
+ *     .connectionString("${connectionString}")
+ *     .endpoint("${endpoint}")
+ *     .buildDirectoryClient();
+ * 
+ * * *

View {@link ShareFileClientBuilder this} for additional ways to construct the client.

* @@ -110,7 +117,11 @@ public ShareDirectoryClient getSubdirectoryClient(String subdirectoryName) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.exists} + * + *
+     * System.out.printf("Exists? %b%n", client.exists());
+     * 
+ * * * @return Flag indicating existence of the directory. */ @@ -124,7 +135,12 @@ public Boolean exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.existsWithResponse#Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * System.out.printf("Exists? %b%n", client.existsWithResponse(timeout, context).getValue());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -144,7 +160,12 @@ public Response existsWithResponse(Duration timeout, Context context) { * *

Create the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.createDirectory} + * + *
+     * shareDirectoryClient.create();
+     * System.out.println("Completed creating the directory. ");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -165,7 +186,15 @@ public ShareDirectoryInfo create() { * *

Create the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.createWithResponse#FileSmbProperties-String-Map-Duration-Context} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * Response<ShareDirectoryInfo> response = shareDirectoryClient.createWithResponse(smbProperties, filePermission,
+     *     Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Completed creating the directory with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -196,7 +225,12 @@ public Response createWithResponse(FileSmbProperties smbProp * *

Delete the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.delete} + * + *
+     * shareDirectoryClient.delete();
+     * System.out.println("Completed deleting the file.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -215,7 +249,12 @@ public void delete() { * *

Delete the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.deleteWithResponse#duration-context} + * + *
+     * Response<Void> response = shareDirectoryClient.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Completed deleting the file with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -241,7 +280,12 @@ public Response deleteWithResponse(Duration timeout, Context context) { * *

Retrieve directory properties

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.getProperties} + * + *
+     * ShareDirectoryProperties response = shareDirectoryClient.getProperties();
+     * System.out.printf("Directory latest modified date is %s.", response.getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -261,7 +305,13 @@ public ShareDirectoryProperties getProperties() { * *

Retrieve directory properties

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.getPropertiesWithResponse#duration-Context} + * + *
+     * Response<ShareDirectoryProperties> response = shareDirectoryClient.getPropertiesWithResponse(
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Directory latest modified date is %s.", response.getValue().getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -286,7 +336,14 @@ public Response getPropertiesWithResponse(Duration tim * *

Set directory properties

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.setProperties#FileSmbProperties-String} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * ShareDirectoryInfo response = shareDirectoryClient.setProperties(smbProperties, filePermission);
+     * System.out.printf("Directory latest modified date is %s.", response.getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -307,7 +364,15 @@ public ShareDirectoryInfo setProperties(FileSmbProperties smbProperties, String * *

Set directory properties

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.setPropertiesWithResponse#FileSmbProperties-String-Duration-Context} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * Response<ShareDirectoryInfo> response = shareDirectoryClient.setPropertiesWithResponse(smbProperties, filePermission,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Directory latest modified date is %s.", response.getValue().getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -337,11 +402,22 @@ public Response setPropertiesWithResponse(FileSmbProperties * *

Set the metadata to "directory:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.setMetadata#map} + * + *
+     * ShareDirectorySetMetadataInfo response =
+     *     shareDirectoryClient.setMetadata(Collections.singletonMap("directory", "updatedMetadata"));
+     * System.out.printf("Setting the directory metadata completed with updated etag %s", response.getETag());
+     * 
+ * * *

Clear the metadata of the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.setMetadata#map.clearMetadata} + * + *
+     * ShareDirectorySetMetadataInfo response = shareDirectoryClient.setMetadata(null);
+     * System.out.printf("Cleared metadata.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -365,11 +441,24 @@ public ShareDirectorySetMetadataInfo setMetadata(Map metadata) { * *

Set the metadata to "directory:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.setMetadataWithResponse#map-duration-context} + * + *
+     * Response<ShareDirectorySetMetadataInfo> response =
+     *     shareDirectoryClient.setMetadataWithResponse(Collections.singletonMap("directory", "updatedMetadata"),
+     *         Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting the directory metadata completed with updated etag %d", response.getStatusCode());
+     * 
+ * * *

Clear the metadata of the directory

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.setMetadataWithResponse#map-duration-context.clearMetadata} + * + *
+     * Response<ShareDirectorySetMetadataInfo> response = shareDirectoryClient.setMetadataWithResponse(null,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Directory latest modified date is %s.", response.getStatusCode());
+     * 
+ * *

For more information, see the * Azure Docs.

* @@ -397,7 +486,14 @@ public Response setMetadataWithResponse(MapList all sub-directories and files in the account

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.listFilesAndDirectories} + * + *
+     * shareDirectoryClient.listFilesAndDirectories().forEach(
+     *     fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.",
+     *         fileRef.isDirectory(), fileRef.getName())
+     * );
+     * 
+ * * *

For more information, see the * Azure @@ -418,7 +514,15 @@ public PagedIterable listFilesAndDirectories() { *

List all sub-directories and files in this directory with "subdir" prefix and return 10 results in the * account

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.listFilesAndDirectories#string-integer-duration-context} + * + *
+     * shareDirectoryClient.listFilesAndDirectories("subdir", 10, Duration.ofSeconds(1),
+     *     new Context(key1, value1)).forEach(
+     *         fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.",
+     *             fileRef.isDirectory(), fileRef.getName())
+     * );
+     * 
+ * * *

For more information, see the * Azure @@ -451,7 +555,14 @@ public PagedIterable listFilesAndDirectories(String prefix, Integ *

List all sub-directories and files in this directory with "subdir" prefix and return 10 results in the * account

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.listFilesAndDirectories#ShareListFilesAndDirectoriesOptions-duration-context} + * + *
+     * shareDirectoryClient.listFilesAndDirectories(new ShareListFilesAndDirectoriesOptions()
+     *         .setPrefix("subdir").setMaxResultsPerPage(10), Duration.ofSeconds(1), new Context(key1, value1))
+     *     .forEach(fileRef -> System.out.printf("Is the resource a directory? %b. The resource name is: %s.",
+     *         fileRef.isDirectory(), fileRef.getName()));
+     * 
+ * * *

For more information, see the * Azure @@ -478,7 +589,13 @@ public PagedIterable listFilesAndDirectories(ShareListFilesAndDir * *

Get 10 handles with recursive call.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.listHandles#Integer-boolean-duration-context} + * + *
+     * Iterable<HandleItem> result = shareDirectoryClient.listHandles(10, true, Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Get handles completed with handle id %s", result.iterator().next().getHandleId());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -507,7 +624,14 @@ public PagedIterable listHandles(Integer maxResultsPerPage, boolean * *

Force close handles returned by list handles.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.forceCloseHandle#String} + * + *
+     * shareDirectoryClient.listHandles(null, true, Duration.ofSeconds(30), Context.NONE).forEach(handleItem -> {
+     *     shareDirectoryClient.forceCloseHandle(handleItem.getHandleId());
+     *     System.out.printf("Closed handle %s on resource %s%n", handleItem.getHandleId(), handleItem.getPath());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -528,7 +652,16 @@ public CloseHandlesInfo forceCloseHandle(String handleId) { * *

Force close handles returned by list handles.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.forceCloseHandleWithResponse#String-Duration-Context} + * + *
+     * shareDirectoryClient.listHandles(null, true, Duration.ofSeconds(30), Context.NONE).forEach(handleItem -> {
+     *     Response<CloseHandlesInfo> closeResponse = shareDirectoryClient.forceCloseHandleWithResponse(
+     *         handleItem.getHandleId(), Duration.ofSeconds(30), Context.NONE);
+     *     System.out.printf("Closing handle %s on resource %s completed with status code %d%n",
+     *         handleItem.getHandleId(), handleItem.getPath(), closeResponse.getStatusCode());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -553,7 +686,14 @@ public Response forceCloseHandleWithResponse(String handleId, * *

Force close all handles recursively.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.forceCloseAllHandles#boolean-Duration-Context} + * + *
+     * CloseHandlesInfo closeHandlesInfo = shareDirectoryClient.forceCloseAllHandles(true, Duration.ofSeconds(30),
+     *     Context.NONE);
+     * System.out.printf("Closed %d open handles on the directory%n", closeHandlesInfo.getClosedHandles());
+     * System.out.printf("Failed to close %d open handles on the directory%n", closeHandlesInfo.getFailedHandles());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -581,7 +721,12 @@ public CloseHandlesInfo forceCloseAllHandles(boolean recursive, Duration timeout * *

Create the sub directory "subdir"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.createSubdirectory#string} + * + *
+     * shareDirectoryClient.createSubdirectory("subdir");
+     * System.out.println("Completed creating the subdirectory.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -605,7 +750,16 @@ public ShareDirectoryClient createSubdirectory(String subdirectoryName) { * *

Create the subdirectory named "subdir", with metadata

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.createSubdirectoryWithResponse#String-FileSmbProperties-String-Map-Duration-Context} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties();
+     * String filePermission = "filePermission";
+     * Response<ShareDirectoryClient> response = shareDirectoryClient.createSubdirectoryWithResponse("subdir",
+     *     smbProperties, filePermission, Collections.singletonMap("directory", "metadata"),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Creating the sub directory completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -639,7 +793,12 @@ public Response createSubdirectoryWithResponse(String subd * *

Delete the subdirectory named "subdir"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.deleteSubdirectory#string} + * + *
+     * shareDirectoryClient.deleteSubdirectory("mysubdirectory");
+     * System.out.println("Complete deleting the subdirectory.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -661,7 +820,13 @@ public void deleteSubdirectory(String subdirectoryName) { * *

Delete the subdirectory named "subdir"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.deleteSubdirectoryWithResponse#string-duration-context} + * + *
+     * Response<Void> response = shareDirectoryClient.deleteSubdirectoryWithResponse("mysubdirectory",
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Completed deleting the subdirectory with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -690,7 +855,12 @@ public Response deleteSubdirectoryWithResponse(String subdirectoryName, Du * *

Create 1k file with named "myFile"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.createFile#string-long} + * + *
+     * ShareFileClient response = shareDirectoryClient.createFile("myfile", 1024);
+     * System.out.println("Completed creating the file: " + response);
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -715,7 +885,27 @@ public ShareFileClient createFile(String fileName, long maxSize) { * *

Create the file named "myFile"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.createFile#String-long-ShareFileHttpHeaders-FileSmbProperties-String-Map-duration-context} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * Response<ShareFileClient> response = shareDirectoryClient.createFileWithResponse("myFile", 1024,
+     *     httpHeaders, smbProperties, filePermission, Collections.singletonMap("directory", "metadata"),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Completed creating the file with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -750,7 +940,30 @@ public Response createFileWithResponse(String fileName, long ma * *

Create the file named "myFile"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.createFile#String-long-ShareFileHttpHeaders-FileSmbProperties-String-Map-ShareRequestConditions-duration-context} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *
+     * Response<ShareFileClient> response = shareDirectoryClient.createFileWithResponse("myFile", 1024,
+     *     httpHeaders, smbProperties, filePermission, Collections.singletonMap("directory", "metadata"),
+     *     requestConditions, Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Completed creating the file with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -787,7 +1000,12 @@ public Response createFileWithResponse(String fileName, long ma * *

Delete the file "filetest"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.deleteFile#string} + * + *
+     * shareDirectoryClient.deleteFile("myfile");
+     * System.out.println("Completed deleting the file.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -808,7 +1026,13 @@ public void deleteFile(String fileName) { * *

Delete the file "filetest"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.deleteFileWithResponse#string-duration-context} + * + *
+     * Response<Void> response = shareDirectoryClient.deleteFileWithResponse("myfile",
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Completed deleting the file with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -834,7 +1058,14 @@ public Response deleteFileWithResponse(String fileName, Duration timeout, * *

Delete the file "filetest"

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.deleteFileWithResponse#string-ShareRequestConditions-duration-context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<Void> response = shareDirectoryClient.deleteFileWithResponse("myfile", requestConditions,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Completed deleting the file with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -864,7 +1095,20 @@ public Response deleteFileWithResponse(String fileName, ShareRequestCondit * *

Get the share snapshot id.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.getShareSnapshotId} + * + *
+     * OffsetDateTime currentTime = OffsetDateTime.of(LocalDateTime.now(), ZoneOffset.UTC);
+     * ShareDirectoryClient shareDirectoryClient = new ShareFileClientBuilder()
+     *     .endpoint("https://${accountName}.file.core.windows.net")
+     *     .sasToken("${SASToken}")
+     *     .shareName("myshare")
+     *     .resourcePath("mydirectory")
+     *     .snapshot(currentTime.toString())
+     *     .buildDirectoryClient();
+     *
+     * System.out.printf("Snapshot ID: %s%n", shareDirectoryClient.getShareSnapshotId());
+     * 
+ * * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. @@ -878,7 +1122,12 @@ public String getShareSnapshotId() { * *

Get the share name.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.getShareName} + * + *
+     * String shareName = directoryAsyncClient.getShareName();
+     * System.out.println("The share name of the directory is " + shareName);
+     * 
+ * * * @return The share name of the directory. */ @@ -891,7 +1140,12 @@ public String getShareName() { * *

Get directory path.

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.getDirectoryPath} + * + *
+     * String directoryPath = shareDirectoryClient.getDirectoryPath();
+     * System.out.println("The name of the directory is " + directoryPath);
+     * 
+ * * * @return The path of the directory. */ @@ -924,7 +1178,17 @@ public HttpPipeline getHttpPipeline() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.generateSas#ShareServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareFileSasPermission permission = new ShareFileSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * shareDirectoryClient.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @@ -941,7 +1205,18 @@ public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatur * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.generateSas#ShareServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareFileSasPermission permission = new ShareFileSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * shareDirectoryClient.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileAsyncClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileAsyncClient.java index a8703443278f9..38e287121230b 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileAsyncClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileAsyncClient.java @@ -126,7 +126,14 @@ * *

Instantiating an Asynchronous File Client

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.instantiation} + * + *
+ * ShareFileAsyncClient client = new ShareFileClientBuilder()
+ *     .connectionString("${connectionString}")
+ *     .endpoint("${endpoint}")
+ *     .buildFileAsyncClient();
+ * 
+ * * *

View {@link ShareFileClientBuilder this} for additional ways to construct the client.

* @@ -207,7 +214,11 @@ public ShareServiceVersion getServiceVersion() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.exists} + * + *
+     * client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response));
+     * 
+ * * * @return Flag indicating existence of the file. */ @@ -221,7 +232,11 @@ public Mono exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.existsWithResponse} + * + *
+     * client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.getValue()));
+     * 
+ * * * @return Flag indicating existence of the file. */ @@ -271,7 +286,15 @@ private boolean checkDoesNotExistStatusCode(Throwable t) { * *

Create the file with size 1KB.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.create} + * + *
+     * shareFileAsyncClient.create(1024).subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete creating the file!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -298,7 +321,27 @@ public Mono create(long maxSize) { * *

Create the file with length of 1024 bytes, some headers, file smb properties and metadata.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.createWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-Map} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * shareFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, filePermission,
+     *     Collections.singletonMap("directory", "metadata"))
+     *     .subscribe(response -> System.out.printf("Creating the file completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -329,7 +372,30 @@ public Mono> createWithResponse(long maxSize, ShareFileH * *

Create the file with length of 1024 bytes, some headers, file smb properties and metadata.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.createWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-Map-ShareRequestConditions} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *
+     * shareFileAsyncClient.createWithResponse(1024, httpHeaders, smbProperties, filePermission,
+     *     Collections.singletonMap("directory", "metadata"), requestConditions)
+     *     .subscribe(response -> System.out.printf("Creating the file completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -388,7 +454,19 @@ Mono> createWithResponse(long maxSize, ShareFileHttpHead * *

Copy file from source url to the {@code resourcePath}

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.beginCopy#string-map-duration} + * + *
+     * PollerFlux<ShareFileCopyInfo, Void> poller = shareFileAsyncClient.beginCopy(
+     *     "https://{accountName}.file.core.windows.net?{SASToken}",
+     *     Collections.singletonMap("file", "metadata"), Duration.ofSeconds(2));
+     *
+     * poller.subscribe(response -> {
+     *     final ShareFileCopyInfo value = response.getValue();
+     *     System.out.printf("Copy source: %s. Status: %s.%n", value.getCopySourceUrl(), value.getCopyStatus());
+     * }, error -> System.err.println("Error: " + error),
+     *     () -> System.out.println("Complete copying the file."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -413,7 +491,30 @@ public PollerFlux beginCopy(String sourceUrl, MapCopy file from source url to the {@code resourcePath}

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.beginCopy#string-filesmbproperties-string-permissioncopymodetype-boolean-boolean-map-duration-ShareRequestConditions} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * boolean ignoreReadOnly = false; // Default value
+     * boolean setArchiveAttribute = true; // Default value
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *
+     * PollerFlux<ShareFileCopyInfo, Void> poller = shareFileAsyncClient.beginCopy(
+     *     "https://{accountName}.file.core.windows.net?{SASToken}",
+     *     smbProperties, filePermission, PermissionCopyModeType.SOURCE, ignoreReadOnly, setArchiveAttribute,
+     *     Collections.singletonMap("file", "metadata"), Duration.ofSeconds(2), requestConditions);
+     *
+     * poller.subscribe(response -> {
+     *     final ShareFileCopyInfo value = response.getValue();
+     *     System.out.printf("Copy source: %s. Status: %s.%n", value.getCopySourceUrl(), value.getCopyStatus());
+     * }, error -> System.err.println("Error: " + error), () -> System.out.println("Complete copying the file."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -565,7 +666,12 @@ private Mono> onPoll(PollResponseAbort copy file from copy id("someCopyId")

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.abortCopy#string} + * + *
+     * shareFileAsyncClient.abortCopy("someCopyId")
+     *     .doOnSuccess(response -> System.out.println("Abort copying the file completed."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -589,7 +695,13 @@ public Mono abortCopy(String copyId) { * *

Abort copy file from copy id("someCopyId")

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.abortCopyWithResponse#string} + * + *
+     * shareFileAsyncClient.abortCopyWithResponse("someCopyId")
+     *     .subscribe(response -> System.out.printf("Abort copying the file completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -609,7 +721,14 @@ public Mono> abortCopyWithResponse(String copyId) { * *

Abort copy file from copy id("someCopyId")

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.abortCopyWithResponse#string-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.abortCopyWithResponse("someCopyId", requestConditions)
+     *     .subscribe(response -> System.out.printf("Abort copying the file completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -645,7 +764,19 @@ Mono> abortCopyWithResponse(String copyId, ShareRequestConditions * *

Download the file to current folder.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.downloadToFile#string} + * + *
+     * shareFileAsyncClient.downloadToFile("somelocalfilepath").subscribe(
+     *     response -> {
+     *         if (Files.exists(Paths.get("somelocalfilepath"))) {
+     *             System.out.println("Successfully downloaded the file.");
+     *         }
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete downloading the file!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -672,7 +803,21 @@ public Mono downloadToFile(String downloadFilePath) { * *

Download the file from 1024 to 2048 bytes to current folder.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.downloadToFileWithResponse#string-ShareFileRange} + * + *
+     * shareFileAsyncClient.downloadToFileWithResponse("somelocalfilepath", new ShareFileRange(1024, 2047L))
+     *     .subscribe(
+     *         response -> {
+     *             if (Files.exists(Paths.get("somelocalfilepath"))) {
+     *                 System.out.println("Successfully downloaded the file with status code "
+     *                     + response.getStatusCode());
+     *             }
+     *         },
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete downloading the file!")
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -697,7 +842,23 @@ public Mono> downloadToFileWithResponse(String dow * *

Download the file from 1024 to 2048 bytes to current folder.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.downloadToFileWithResponse#string-ShareFileRange-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.downloadToFileWithResponse("somelocalfilepath", new ShareFileRange(1024, 2047L),
+     *     requestConditions)
+     *     .subscribe(
+     *         response -> {
+     *             if (Files.exists(Paths.get("somelocalfilepath"))) {
+     *                 System.out.println("Successfully downloaded the file with status code "
+     *                     + response.getStatusCode());
+     *             }
+     *         },
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete downloading the file!")
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -777,7 +938,15 @@ private void channelCleanUp(AsynchronousFileChannel channel) { * *

Download the file with its metadata and properties.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.download} + * + *
+     * shareFileAsyncClient.download().subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete downloading the data!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -799,7 +968,15 @@ public Flux download() { * *

Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.downloadWithResponse#ShareFileRange-Boolean} + * + *
+     * shareFileAsyncClient.downloadWithResponse(new ShareFileRange(1024, 2047L), false)
+     *     .subscribe(response ->
+     *             System.out.printf("Complete downloading the data with status code %d%n", response.getStatusCode()),
+     *         error -> System.err.println(error.getMessage())
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -820,7 +997,16 @@ public Mono downloadWithResponse(ShareFileRange * *

Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.downloadWithResponse#ShareFileRange-Boolean-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.downloadWithResponse(new ShareFileRange(1024, 2047L), false, requestConditions)
+     *     .subscribe(response ->
+     *             System.out.printf("Complete downloading the data with status code %d%n", response.getStatusCode()),
+     *         error -> System.err.println(error.getMessage())
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -844,7 +1030,22 @@ public Mono downloadWithResponse(ShareFileRange * *

Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.downloadWithResponse#ShareFileDownloadOptions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * ShareFileRange range = new ShareFileRange(1024, 2047L);
+     * DownloadRetryOptions retryOptions = new DownloadRetryOptions().setMaxRetryRequests(3);
+     * ShareFileDownloadOptions options = new ShareFileDownloadOptions().setRange(range)
+     *     .setRequestConditions(requestConditions)
+     *     .setRangeContentMd5Requested(false)
+     *     .setRetryOptions(retryOptions);
+     * shareFileAsyncClient.downloadWithResponse(options)
+     *     .subscribe(response ->
+     *             System.out.printf("Complete downloading the data with status code %d%n", response.getStatusCode()),
+     *         error -> System.err.println(error.getMessage())
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -946,7 +1147,15 @@ private Mono downloadRange(ShareFileRange range, Boolean rangeGe * *

Delete the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.delete} + * + *
+     * shareFileAsyncClient.delete().subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete deleting the file!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -970,7 +1179,14 @@ public Mono delete() { * *

Delete the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.deleteWithResponse} + * + *
+     * shareFileAsyncClient.deleteWithResponse().subscribe(
+     *     response -> System.out.println("Complete deleting the file with status code:" + response.getStatusCode()),
+     *     error -> System.err.print(error.toString())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -990,7 +1206,15 @@ public Mono> deleteWithResponse() { * *

Delete the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.deleteWithResponse#ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.deleteWithResponse(requestConditions).subscribe(
+     *     response -> System.out.println("Complete deleting the file with status code:" + response.getStatusCode()),
+     *     error -> System.err.print(error.toString())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1022,7 +1246,14 @@ Mono> deleteWithResponse(ShareRequestConditions requestConditions * *

Retrieve file properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.getProperties} + * + *
+     * shareFileAsyncClient.getProperties()
+     *     .subscribe(properties -> {
+     *         System.out.printf("File latest modified date is %s.", properties.getLastModified());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1046,7 +1277,15 @@ public Mono getProperties() { * *

Retrieve file properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.getPropertiesWithResponse} + * + *
+     * shareFileAsyncClient.getPropertiesWithResponse()
+     *     .subscribe(response -> {
+     *         ShareFileProperties properties = response.getValue();
+     *         System.out.printf("File latest modified date is %s.", properties.getLastModified());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1066,7 +1305,16 @@ public Mono> getPropertiesWithResponse() { * *

Retrieve file properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.getPropertiesWithResponse#ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.getPropertiesWithResponse(requestConditions)
+     *     .subscribe(response -> {
+     *         ShareFileProperties properties = response.getValue();
+     *         System.out.printf("File latest modified date is %s.", properties.getLastModified());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1106,11 +1354,34 @@ Mono> getPropertiesWithResponse(ShareRequestCondit * *

Set the httpHeaders of contentType of "text/plain"

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setProperties#long-ShareFileHttpHeaders-FileSmbProperties-String} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * shareFileAsyncClient.setProperties(1024, httpHeaders, smbProperties, filePermission)
+     *     .doOnSuccess(response -> System.out.println("Setting the file properties completed."));
+     * 
+ * * *

Clear the metadata of the file and preserve the SMB properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setProperties#long-ShareFileHttpHeaders-FileSmbProperties-String.clearHttpHeaderspreserveSMBProperties} + * + *
+     * shareFileAsyncClient.setProperties(1024, null, null, null)
+     *     .subscribe(response -> System.out.println("Setting the file httpHeaders completed."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1144,11 +1415,36 @@ public Mono setProperties(long newFileSize, ShareFileHttpHeaders * *

Set the httpHeaders of contentType of "text/plain"

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setPropertiesWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * shareFileAsyncClient.setPropertiesWithResponse(1024, httpHeaders, smbProperties, filePermission)
+     *     .subscribe(response -> System.out.printf("Setting the file properties completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

Clear the metadata of the file and preserve the SMB properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setPropertiesWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String.clearHttpHeaderspreserveSMBProperties} + * + *
+     * shareFileAsyncClient.setPropertiesWithResponse(1024, null, null, null)
+     *     .subscribe(response -> System.out.printf("Setting the file httpHeaders completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1177,11 +1473,38 @@ public Mono> setPropertiesWithResponse(long newFileSize, * *

Set the httpHeaders of contentType of "text/plain"

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setPropertiesWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-ShareRequestConditions} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.setPropertiesWithResponse(1024, httpHeaders, smbProperties, filePermission, requestConditions)
+     *     .subscribe(response -> System.out.printf("Setting the file properties completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

Clear the metadata of the file and preserve the SMB properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setPropertiesWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-ShareRequestConditions.clearHttpHeaderspreserveSMBProperties} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.setPropertiesWithResponse(1024, null, null, null, requestConditions)
+     *     .subscribe(response -> System.out.printf("Setting the file httpHeaders completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1240,11 +1563,22 @@ Mono> setPropertiesWithResponse(long newFileSize, ShareF * *

Set the metadata to "file:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setMetadata#map} + * + *
+     * shareFileAsyncClient.setMetadata(Collections.singletonMap("file", "updatedMetadata"))
+     *     .doOnSuccess(response -> System.out.println("Setting the file metadata completed."));
+     * 
+ * * *

Clear the metadata of the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setMetadataWithResponse#map.clearMetadata} + * + *
+     * shareFileAsyncClient.setMetadataWithResponse(null).subscribe(
+     *     response -> System.out.printf("Setting the file metadata completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1271,11 +1605,23 @@ public Mono setMetadata(Map metadata) { * *

Set the metadata to "file:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setMetadataWithResponse#map} + * + *
+     * shareFileAsyncClient.setMetadataWithResponse(Collections.singletonMap("file", "updatedMetadata"))
+     *     .subscribe(response -> System.out.printf("Setting the file metadata completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

Clear the metadata of the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setMetadataWithResponse#map.clearMetadata} + * + *
+     * shareFileAsyncClient.setMetadataWithResponse(null).subscribe(
+     *     response -> System.out.printf("Setting the file metadata completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1298,11 +1644,25 @@ public Mono> setMetadataWithResponse(MapSet the metadata to "file:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setMetadataWithResponse#map-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.setMetadataWithResponse(Collections.singletonMap("file", "updatedMetadata"), requestConditions)
+     *     .subscribe(response -> System.out.printf("Setting the file metadata completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

Clear the metadata of the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.setMetadataWithResponse#map-ShareRequestConditions.clearMetadata} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.setMetadataWithResponse(null, requestConditions).subscribe(
+     *     response -> System.out.printf("Setting the file metadata completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1345,7 +1705,16 @@ Mono> setMetadataWithResponse(MapUpload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.upload#flux-long} + * + *
+     * ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8));
+     * shareFileAsyncClient.upload(Flux.just(defaultData), defaultData.remaining()).subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete deleting the file!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1376,7 +1745,16 @@ public Mono upload(Flux data, long length) { * *

Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadWithResponse#flux-long-long} + * + *
+     * ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8));
+     * shareFileAsyncClient.uploadWithResponse(Flux.just(defaultData), defaultData.remaining(), 0L).subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete deleting the file!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1409,7 +1787,18 @@ public Mono> uploadWithResponse(Flux d * *

Upload the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadWithResponse#flux-long-long-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8));
+     * shareFileAsyncClient.uploadWithResponse(Flux.just(defaultData), defaultData.remaining(), 0L, requestConditions)
+     *     .subscribe(
+     *         response -> { },
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete deleting the file!")
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1449,7 +1838,15 @@ public Mono> uploadWithResponse(Flux d * *

Upload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.upload#Flux-ParallelTransferOptions} + * + *
+     * ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8));
+     * shareFileAsyncClient.upload(Flux.just(defaultData), null).subscribe(
+     *         response -> { },
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete deleting the file!"));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1475,7 +1872,17 @@ public Mono upload(Flux data, ParallelTransferO * *

Upload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadWithResponse#ShareFileUploadOptions} + * + *
+     * ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8));
+     * shareFileAsyncClient.uploadWithResponse(new ShareFileUploadOptions(
+     *     Flux.just(defaultData))).subscribe(
+     *         response -> { },
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete deleting the file!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1589,7 +1996,16 @@ Mono> uploadInChunks(Flux data, long o * *

Upload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadRange#Flux-long} + * + *
+     * ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8));
+     * shareFileAsyncClient.uploadRange(Flux.just(defaultData), defaultData.remaining()).subscribe(
+     *         response -> { },
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete deleting the file!")
+     * );
+     * 
+ * * *

This method does a single Put Range operation. For more information, see the * Azure Docs.

@@ -1616,7 +2032,16 @@ public Mono uploadRange(Flux data, long length) * *

Upload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadRangeWithResponse#ShareFileUploadRangeOptions} + * + *
+     * ByteBuffer defaultData = ByteBuffer.wrap("default".getBytes(StandardCharsets.UTF_8));
+     * shareFileAsyncClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(
+     *     Flux.just(defaultData), defaultData.remaining())).subscribe(
+     *         response -> { },
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete deleting the file!"));
+     * 
+ * * *

This method does a single Put Range operation. For more information, see the * Azure Docs.

@@ -1664,7 +2089,15 @@ Mono> uploadRangeWithResponse(ShareFileUploadRange * *

Upload a number of bytes from a file at defined source and destination offsets

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadRangeFromUrl#long-long-long-String} + * + *
+     * shareFileAsyncClient.uploadRangeFromUrl(6, 8, 0, "sourceUrl").subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Completed upload range from url!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1694,7 +2127,15 @@ public Mono uploadRangeFromUrl(long length, lon * *

Upload a number of bytes from a file at defined source and destination offsets

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadRangeFromUrlWithResponse#long-long-long-String} + * + *
+     * shareFileAsyncClient.uploadRangeFromUrlWithResponse(6, 8, 0, "sourceUrl").subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Completed upload range from url!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1720,7 +2161,16 @@ public Mono> uploadRangeFromUrlWithRes * *

Upload a number of bytes from a file at defined source and destination offsets

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadRangeFromUrlWithResponse#long-long-long-String-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.uploadRangeFromUrlWithResponse(6, 8, 0, "sourceUrl", requestConditions).subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Completed upload range from url!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1750,7 +2200,16 @@ public Mono> uploadRangeFromUrlWithRes * *

Upload a number of bytes from a file at defined source and destination offsets

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadRangeFromUrlWithResponse#ShareFileUploadRangeFromUrlOptions} + * + *
+     * shareFileAsyncClient.uploadRangeFromUrlWithResponse(
+     *     new ShareFileUploadRangeFromUrlOptions(6, "sourceUrl").setDestinationOffset(8))
+     *     .subscribe(
+     *         response -> { },
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Completed upload range from url!"));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1800,7 +2259,15 @@ Mono> uploadRangeFromUrlWithResponse( * *

Clears the first 1024 bytes.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.clearRange#long} + * + *
+     * shareFileAsyncClient.clearRange(1024).subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete clearing the range!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1825,7 +2292,15 @@ public Mono clearRange(long length) { * *

Clear the range starting from 1024 with length of 1024.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.clearRange#long-long} + * + *
+     * shareFileAsyncClient.clearRangeWithResponse(1024, 1024).subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete clearing the range!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1849,7 +2324,16 @@ public Mono> clearRangeWithResponse(long length, l * *

Clear the range starting from 1024 with length of 1024.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.clearRange#long-long-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.clearRangeWithResponse(1024, 1024, requestConditions).subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete clearing the range!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1891,7 +2375,15 @@ Mono> clearRangeWithResponse(long length, long off * *

Upload the file from the source file path.

* - * (@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadFromFile#string} + * + *
+     * shareFileAsyncClient.uploadFromFile("someFilePath").subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete deleting the file!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs Create File @@ -1914,7 +2406,16 @@ public Mono uploadFromFile(String uploadFilePath) { * *

Upload the file from the source file path.

* - * (@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.uploadFromFile#string-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.uploadFromFile("someFilePath", requestConditions).subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete deleting the file!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs Create File @@ -1964,7 +2465,12 @@ private List sliceFile(String path) { * *

List all ranges for the file client.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.listRanges} + * + *
+     * shareFileAsyncClient.listRanges().subscribe(range ->
+     *     System.out.printf("List ranges completed with start: %d, end: %d", range.getStart(), range.getEnd()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1987,7 +2493,13 @@ public PagedFlux listRanges() { * *

List all ranges within the file range from 1KB to 2KB.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.listRanges#ShareFileRange} + * + *
+     * shareFileAsyncClient.listRanges(new ShareFileRange(1024, 2048L))
+     *     .subscribe(result -> System.out.printf("List ranges completed with start: %d, end: %d",
+     *         result.getStart(), result.getEnd()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -2007,7 +2519,14 @@ public PagedFlux listRanges(ShareFileRange range) { * *

List all ranges within the file range from 1KB to 2KB.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.listRanges#ShareFileRange-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * shareFileAsyncClient.listRanges(new ShareFileRange(1024, 2048L), requestConditions)
+     *     .subscribe(result -> System.out.printf("List ranges completed with start: %d, end: %d",
+     *         result.getStart(), result.getEnd()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -2030,7 +2549,17 @@ public PagedFlux listRanges(ShareFileRange range, ShareRequestCo * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.listRangesDiff#String} + * + *
+     * final String prevSnapshot = "previoussnapshot";
+     * shareFileAsyncClient.listRangesDiff(prevSnapshot).subscribe(response -> {
+     *     System.out.println("Valid Share File Ranges are:");
+     *     for (FileRange range : response.getRanges()) {
+     *         System.out.printf("Start: %s, End: %s%n", range.getStart(), range.getEnd());
+     *     }
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -2057,7 +2586,17 @@ public Mono listRangesDiff(String previousSnapshot) { * *

List all ranges within the file range from 1KB to 2KB.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.listRangesDiffWithResponse#ShareFileListRangesDiffOptions} + * + *
+     * shareFileAsyncClient.listRangesDiffWithResponse(new ShareFileListRangesDiffOptions("previoussnapshot")
+     *     .setRange(new ShareFileRange(1024, 2048L))).subscribe(response -> {
+     *         System.out.println("Valid Share File Ranges are:");
+     *         for (FileRange range : response.getValue().getRanges()) {
+     *             System.out.printf("Start: %s, End: %s%n", range.getStart(), range.getEnd());
+     *         }
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -2116,7 +2655,12 @@ Mono> listRangesWithResponse(ShareFileRange range, * *

List all handles for the file client.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.listHandles} + * + *
+     * shareFileAsyncClient.listHandles()
+     *     .subscribe(result -> System.out.printf("List handles completed with handle id %s", result.getHandleId()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -2139,7 +2683,12 @@ public PagedFlux listHandles() { * *

List 10 handles for the file client.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.listHandles#integer} + * + *
+     * shareFileAsyncClient.listHandles(10)
+     *     .subscribe(result -> System.out.printf("List handles completed with handle id %s", result.getHandleId()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -2178,7 +2727,14 @@ PagedFlux listHandlesWithOptionalTimeout(Integer maxResultsPerPage, * *

Force close handles returned by list handles.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.forceCloseHandle#String} + * + *
+     * shareFileAsyncClient.listHandles().subscribe(handleItem ->
+     *     shareFileAsyncClient.forceCloseHandle(handleItem.getHandleId()).subscribe(ignored ->
+     *         System.out.printf("Closed handle %s on resource %s%n",
+     *             handleItem.getHandleId(), handleItem.getPath())));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -2203,7 +2759,14 @@ public Mono forceCloseHandle(String handleId) { * *

Force close handles returned by list handles.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.forceCloseHandleWithResponse#String} + * + *
+     * shareFileAsyncClient.listHandles().subscribe(handleItem ->
+     *     shareFileAsyncClient.forceCloseHandleWithResponse(handleItem.getHandleId()).subscribe(response ->
+     *         System.out.printf("Closing handle %s on resource %s completed with status code %d%n",
+     *             handleItem.getHandleId(), handleItem.getPath(), response.getStatusCode())));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -2238,7 +2801,13 @@ Mono> forceCloseHandleWithResponse(String handleId, C * *

Force close all handles.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.forceCloseAllHandles} + * + *
+     * shareFileAsyncClient.forceCloseAllHandles().subscribe(handlesClosedInfo ->
+     *     System.out.printf("Closed %d open handles on the file.%nFailed to close %d open handles on the file%n",
+     *         handlesClosedInfo.getClosedHandles(), handlesClosedInfo.getFailedHandles()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -2281,7 +2850,20 @@ PagedFlux forceCloseAllHandlesWithOptionalTimeout(Duration tim * *

Get the share snapshot id.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.getShareSnapshotId} + * + *
+     * OffsetDateTime currentTime = OffsetDateTime.of(LocalDateTime.now(), ZoneOffset.UTC);
+     * ShareFileAsyncClient shareFileAsyncClient = new ShareFileClientBuilder()
+     *     .endpoint("https://${accountName}.file.core.windows.net")
+     *     .sasToken("${SASToken}")
+     *     .shareName("myshare")
+     *     .resourcePath("myfiile")
+     *     .snapshot(currentTime.toString())
+     *     .buildFileAsyncClient();
+     *
+     * System.out.printf("Snapshot ID: %s%n", shareFileAsyncClient.getShareSnapshotId());
+     * 
+ * * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. @@ -2295,7 +2877,12 @@ public String getShareSnapshotId() { * *

Get the share name.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.getShareName} + * + *
+     * String shareName = directoryAsyncClient.getShareName();
+     * System.out.println("The share name of the directory is " + shareName);
+     * 
+ * * * @return The share name of the file. */ @@ -2308,7 +2895,12 @@ public String getShareName() { * *

Get the file path.

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.getFilePath} + * + *
+     * String filePath = shareFileAsyncClient.getFilePath();
+     * System.out.println("The name of the file is " + filePath);
+     * 
+ * * * @return The path of the file. */ @@ -2342,7 +2934,17 @@ public HttpPipeline getHttpPipeline() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.generateSas#ShareServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareFileSasPermission permission = new ShareFileSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * shareFileAsyncClient.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @@ -2359,7 +2961,18 @@ public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatur * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.generateSas#ShareServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareFileSasPermission permission = new ShareFileSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * shareFileAsyncClient.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClient.java index eee4394b01c57..87664907fa6bf 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClient.java @@ -57,7 +57,14 @@ * *

Instantiating a synchronous File Client

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.instantiation} + * + *
+ * ShareFileClient client = new ShareFileClientBuilder()
+ *     .connectionString("${connectionString}")
+ *     .endpoint("${endpoint}")
+ *     .buildFileClient();
+ * 
+ * * *

View {@link ShareFileClientBuilder this} for additional ways to construct the client.

* @@ -158,7 +165,11 @@ public final StorageFileOutputStream getFileOutputStream(long offset) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.exists} + * + *
+     * System.out.printf("Exists? %b%n", client.exists());
+     * 
+ * * * @return Flag indicating existence of the file. */ @@ -172,7 +183,12 @@ public Boolean exists() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.existsWithResponse#Duration-Context} + * + *
+     * Context context = new Context("Key", "Value");
+     * System.out.printf("Exists? %b%n", client.existsWithResponse(timeout, context).getValue());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -192,7 +208,12 @@ public Response existsWithResponse(Duration timeout, Context context) { * *

Create the file with length of 1024 bytes, some headers and metadata.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.create} + * + *
+     * ShareFileInfo response = fileClient.create(1024);
+     * System.out.println("Complete creating the file.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -214,7 +235,27 @@ public ShareFileInfo create(long maxSize) { * *

Create the file with length of 1024 bytes, some headers, file smb properties and metadata.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.createWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-Map-Duration-Context} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * Response<ShareFileInfo> response = fileClient.createWithResponse(1024, httpHeaders, smbProperties,
+     *     filePermission, Collections.singletonMap("directory", "metadata"), Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Creating the file completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -248,7 +289,30 @@ public Response createWithResponse(long maxSize, ShareFileHttpHea * *

Create the file with length of 1024 bytes, some headers, file smb properties and metadata.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.createWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-Map-ShareRequestConditions-Duration-Context} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *
+     * Response<ShareFileInfo> response = fileClient.createWithResponse(1024, httpHeaders, smbProperties,
+     *     filePermission, Collections.singletonMap("directory", "metadata"), requestConditions, Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Creating the file completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -285,7 +349,18 @@ public Response createWithResponse(long maxSize, ShareFileHttpHea * *

Copy file from source getDirectoryUrl to the {@code resourcePath}

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.beginCopy#string-map-duration} + * + *
+     * SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy(
+     *     "https://{accountName}.file.core.windows.net?{SASToken}",
+     *     Collections.singletonMap("file", "metadata"), Duration.ofSeconds(2));
+     *
+     * final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
+     * final ShareFileCopyInfo value = pollResponse.getValue();
+     * System.out.printf("Copy source: %s. Status: %s.%n", value.getCopySourceUrl(), value.getCopyStatus());
+     *
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -310,7 +385,29 @@ public SyncPoller beginCopy(String sourceUrl, MapCopy file from source getDirectoryUrl to the {@code resourcePath}

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.beginCopy#string-filesmbproperties-string-permissioncopymodetype-boolean-boolean-map-duration-ShareRequestConditions} + * + *
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * boolean ignoreReadOnly = false; // Default value
+     * boolean setArchiveAttribute = true; // Default value
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *
+     * SyncPoller<ShareFileCopyInfo, Void> poller = fileClient.beginCopy(
+     *     "https://{accountName}.file.core.windows.net?{SASToken}", smbProperties, filePermission,
+     *     PermissionCopyModeType.SOURCE, ignoreReadOnly, setArchiveAttribute,
+     *     Collections.singletonMap("file", "metadata"), Duration.ofSeconds(2), requestConditions);
+     *
+     * final PollResponse<ShareFileCopyInfo> pollResponse = poller.poll();
+     * final ShareFileCopyInfo value = pollResponse.getValue();
+     * System.out.printf("Copy source: %s. Status: %s.%n", value.getCopySourceUrl(), value.getCopyStatus());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -345,7 +442,12 @@ public SyncPoller beginCopy(String sourceUrl, FileSmbPr * *

Abort copy file from copy id("someCopyId")

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopy#string} + * + *
+     * fileClient.abortCopy("someCopyId");
+     * System.out.println("Abort copying the file completed.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -364,7 +466,13 @@ public void abortCopy(String copyId) { * *

Abort copy file from copy id("someCopyId")

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse#string-duration-context} + * + *
+     * Response<Void> response = fileClient.abortCopyWithResponse("someCopyId", Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Abort copying the file completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -388,7 +496,14 @@ public Response abortCopyWithResponse(String copyId, Duration timeout, Con * *

Abort copy file from copy id("someCopyId")

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse#string-ShareRequestConditions-duration-context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<Void> response = fileClient.abortCopyWithResponse("someCopyId", requestConditions,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Abort copying the file completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -418,7 +533,14 @@ public Response abortCopyWithResponse(String copyId, ShareRequestCondition * *

Download the file to current folder.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFile#string} + * + *
+     * fileClient.downloadToFile("somelocalfilepath");
+     * if (Files.exists(Paths.get("somelocalfilepath"))) {
+     *     System.out.println("Complete downloading the file.");
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -441,7 +563,16 @@ public ShareFileProperties downloadToFile(String downloadFilePath) { * *

Download the file from 1024 to 2048 bytes to current folder.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse#String-ShareFileRange-Duration-Context} + * + *
+     * Response<ShareFileProperties> response =
+     *     fileClient.downloadToFileWithResponse("somelocalfilepath", new ShareFileRange(1024, 2047L),
+     *         Duration.ofSeconds(1), Context.NONE);
+     * if (Files.exists(Paths.get("somelocalfilepath"))) {
+     *     System.out.println("Complete downloading the file with status code " + response.getStatusCode());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -469,7 +600,17 @@ public Response downloadToFileWithResponse(String downloadF * *

Download the file from 1024 to 2048 bytes to current folder.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse#String-ShareFileRange-ShareRequestConditions-Duration-Context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<ShareFileProperties> response =
+     *     fileClient.downloadToFileWithResponse("somelocalfilepath", new ShareFileRange(1024, 2047L),
+     *         requestConditions, Duration.ofSeconds(1), Context.NONE);
+     * if (Files.exists(Paths.get("somelocalfilepath"))) {
+     *     System.out.println("Complete downloading the file with status code " + response.getStatusCode());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -497,7 +638,18 @@ public Response downloadToFileWithResponse(String downloadF * *

Download the file with its metadata and properties.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.download#OutputStream} + * + *
+     * try {
+     *     ByteArrayOutputStream stream = new ByteArrayOutputStream();
+     *     fileClient.download(stream);
+     *     System.out.printf("Completed downloading the file with content: %n%s%n",
+     *         new String(stream.toByteArray(), StandardCharsets.UTF_8));
+     * } catch (Throwable throwable) {
+     *     System.err.printf("Downloading failed with exception. Message: %s%n", throwable.getMessage());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -516,7 +668,21 @@ public void download(OutputStream stream) { * *

Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadWithResponse#OutputStream-ShareFileRange-Boolean-Duration-Context} + * + *
+     * try {
+     *     ByteArrayOutputStream stream = new ByteArrayOutputStream();
+     *     Response<Void> response = fileClient.downloadWithResponse(stream, new ShareFileRange(1024, 2047L), false,
+     *         Duration.ofSeconds(30), new Context(key1, value1));
+     *
+     *     System.out.printf("Completed downloading file with status code %d%n", response.getStatusCode());
+     *     System.out.printf("Content of the file is: %n%s%n",
+     *         new String(stream.toByteArray(), StandardCharsets.UTF_8));
+     * } catch (Throwable throwable) {
+     *     System.err.printf("Downloading failed with exception. Message: %s%n", throwable.getMessage());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -544,7 +710,22 @@ public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, Share * *

Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadWithResponse#OutputStream-ShareFileRange-Boolean-ShareRequestConditions-Duration-Context} + * + *
+     * try {
+     *     ByteArrayOutputStream stream = new ByteArrayOutputStream();
+     *     ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *     Response<Void> response = fileClient.downloadWithResponse(stream, new ShareFileRange(1024, 2047L), false,
+     *         requestConditions, Duration.ofSeconds(30), new Context(key1, value1));
+     *
+     *     System.out.printf("Completed downloading file with status code %d%n", response.getStatusCode());
+     *     System.out.printf("Content of the file is: %n%s%n",
+     *         new String(stream.toByteArray(), StandardCharsets.UTF_8));
+     * } catch (Throwable throwable) {
+     *     System.err.printf("Downloading failed with exception. Message: %s%n", throwable.getMessage());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -574,7 +755,28 @@ public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, Share * *

Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadWithResponse#OutputStream-ShareFileDownloadOptions-Duration-Context} + * + *
+     * try {
+     *     ByteArrayOutputStream stream = new ByteArrayOutputStream();
+     *     ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     *     ShareFileRange range = new ShareFileRange(1024, 2047L);
+     *     DownloadRetryOptions retryOptions = new DownloadRetryOptions().setMaxRetryRequests(3);
+     *     ShareFileDownloadOptions options = new ShareFileDownloadOptions().setRange(range)
+     *         .setRequestConditions(requestConditions)
+     *         .setRangeContentMd5Requested(false)
+     *         .setRetryOptions(retryOptions);
+     *     Response<Void> response = fileClient.downloadWithResponse(stream, options, Duration.ofSeconds(30),
+     *         new Context(key1, value1));
+     *
+     *     System.out.printf("Completed downloading file with status code %d%n", response.getStatusCode());
+     *     System.out.printf("Content of the file is: %n%s%n",
+     *         new String(stream.toByteArray(), StandardCharsets.UTF_8));
+     * } catch (Throwable throwable) {
+     *     System.err.printf("Downloading failed with exception. Message: %s%n", throwable.getMessage());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -612,7 +814,12 @@ public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, Share * *

Delete the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.delete} + * + *
+     * fileClient.delete();
+     * System.out.println("Complete deleting the file.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -632,7 +839,12 @@ public void delete() { * *

Delete the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.deleteWithResponse#duration-context} + * + *
+     * Response<Void> response = fileClient.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete deleting the file with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -656,7 +868,14 @@ public Response deleteWithResponse(Duration timeout, Context context) { * *

Delete the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.deleteWithResponse#ShareRequestConditions-duration-context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<Void> response = fileClient.deleteWithResponse(requestConditions, Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.println("Complete deleting the file with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -684,7 +903,12 @@ public Response deleteWithResponse(ShareRequestConditions requestCondition * *

Retrieve file properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.getProperties} + * + *
+     * ShareFileProperties properties = fileClient.getProperties();
+     * System.out.printf("File latest modified date is %s.", properties.getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -704,7 +928,13 @@ public ShareFileProperties getProperties() { * *

Retrieve file properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse#duration-context} + * + *
+     * Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse(
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("File latest modified date is %s.", response.getValue().getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -729,7 +959,14 @@ public Response getPropertiesWithResponse(Duration timeout, * *

Retrieve file properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse#ShareRequestConditions-duration-context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse(requestConditions,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("File latest modified date is %s.", response.getValue().getLastModified());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -759,11 +996,34 @@ public Response getPropertiesWithResponse(ShareRequestCondi * *

Set the httpHeaders of contentType of "text/plain"

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties#long-ShareFileHttpHeaders-FileSmbProperties-String} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * fileClient.setProperties(1024, httpHeaders, smbProperties, filePermission);
+     * System.out.println("Setting the file httpHeaders completed.");
+     * 
+ * * *

Clear the httpHeaders of the file and preserve the SMB properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties#long-ShareFileHttpHeaders-FileSmbProperties-String.clearHttpHeaderspreserveSMBProperties} + * + *
+     * ShareFileInfo response = fileClient.setProperties(1024, null, null, null);
+     * System.out.println("Setting the file httpHeaders completed.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -791,11 +1051,36 @@ public ShareFileInfo setProperties(long newFileSize, ShareFileHttpHeaders httpHe * *

Set the httpHeaders of contentType of "text/plain"

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-Duration-Context} + * + *
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse(1024, httpHeaders, smbProperties,
+     *     filePermission, Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting the file httpHeaders completed with status code %d", response.getStatusCode());
+     * 
+ * * *

Clear the httpHeaders of the file and preserve the SMB properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-Duration-Context.clearHttpHeaderspreserveSMBProperties} + * + *
+     * Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse(1024, null, null, null,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting the file httpHeaders completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -827,11 +1112,38 @@ public Response setPropertiesWithResponse(long newFileSize, Share * *

Set the httpHeaders of contentType of "text/plain"

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-ShareRequestConditions-Duration-Context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders()
+     *     .setContentType("text/html")
+     *     .setContentEncoding("gzip")
+     *     .setContentLanguage("en")
+     *     .setCacheControl("no-transform")
+     *     .setContentDisposition("attachment");
+     * FileSmbProperties smbProperties = new FileSmbProperties()
+     *     .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY))
+     *     .setFileCreationTime(OffsetDateTime.now())
+     *     .setFileLastWriteTime(OffsetDateTime.now())
+     *     .setFilePermissionKey("filePermissionKey");
+     * String filePermission = "filePermission";
+     * // NOTE: filePermission and filePermissionKey should never be both set
+     * fileClient.setPropertiesWithResponse(1024, httpHeaders, smbProperties, filePermission, requestConditions, null,
+     *     null);
+     * System.out.println("Setting the file httpHeaders completed.");
+     * 
+ * * *

Clear the httpHeaders of the file and preserve the SMB properties

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse#long-ShareFileHttpHeaders-FileSmbProperties-String-ShareRequestConditions-Duration-Context.clearHttpHeaderspreserveSMBProperties} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<ShareFileInfo> response = fileClient.setPropertiesWithResponse(1024, null, null, null, requestConditions,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting the file httpHeaders completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -867,11 +1179,21 @@ public Response setPropertiesWithResponse(long newFileSize, Share * *

Set the metadata to "file:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata#map} + * + *
+     * fileClient.setMetadata(Collections.singletonMap("file", "updatedMetadata"));
+     * System.out.println("Setting the file metadata completed.");
+     * 
+ * * *

Clear the metadata of the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata#map.clearMetadata} + * + *
+     * fileClient.setMetadata(null);
+     * System.out.println("Setting the file metadata completed.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -894,11 +1216,23 @@ public ShareFileMetadataInfo setMetadata(Map metadata) { * *

Set the metadata to "file:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse#map-duration-context} + * + *
+     * Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse(
+     *     Collections.singletonMap("file", "updatedMetadata"), Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting the file metadata completed with status code %d", response.getStatusCode());
+     * 
+ * * *

Clear the metadata of the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse#map-duration-context.clearMetadata} + * + *
+     * Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse(null,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting the file metadata completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -926,11 +1260,26 @@ public Response setMetadataWithResponse(MapSet the metadata to "file:updatedMetadata"

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse#map-ShareRequestConditions-duration-context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse(
+     *     Collections.singletonMap("file", "updatedMetadata"), requestConditions, Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Setting the file metadata completed with status code %d", response.getStatusCode());
+     * 
+ * * *

Clear the metadata of the file

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse#map-ShareRequestConditions-duration-context.clearMetadata} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<ShareFileMetadataInfo> response = fileClient.setMetadataWithResponse(null, requestConditions,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting the file metadata completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -960,7 +1309,13 @@ public Response setMetadataWithResponse(MapUpload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.upload#InputStream-long} + * + *
+     * InputStream uploadData = new ByteArrayInputStream(data);
+     * ShareFileUploadInfo response = fileClient.upload(uploadData, data.length);
+     * System.out.println("Complete uploading the data with eTag: " + response.getETag());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -990,7 +1345,15 @@ public ShareFileUploadInfo upload(InputStream data, long length) { * *

Upload data "default" starting from 1024.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadWithResponse#InputStream-long-Long-Duration-Context} + * + *
+     * InputStream uploadData = new ByteArrayInputStream(data);
+     * Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse(uploadData, data.length, 0L,
+     *     Duration.ofSeconds(30), null);
+     * System.out.printf("Completed uploading the data with response %d%n.", response.getStatusCode());
+     * System.out.printf("ETag of the file is %s%n", response.getValue().getETag());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1026,7 +1389,16 @@ public Response uploadWithResponse(InputStream data, long l * *

Upload data "default" starting from 1024.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadWithResponse#InputStream-long-Long-ShareRequestConditions-Duration-Context} + * + *
+     * InputStream uploadData = new ByteArrayInputStream(data);
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<ShareFileUploadInfo> response = fileClient.uploadWithResponse(uploadData, data.length, 0L,
+     *     requestConditions, Duration.ofSeconds(30), null);
+     * System.out.printf("Completed uploading the data with response %d%n.", response.getStatusCode());
+     * System.out.printf("ETag of the file is %s%n", response.getValue().getETag());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1065,7 +1437,13 @@ public Response uploadWithResponse(InputStream data, long l * *

Upload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.upload#InputStream-long-ParallelTransferOptions} + * + *
+     * InputStream uploadData = new ByteArrayInputStream(data);
+     * ShareFileUploadInfo response = shareFileClient.upload(uploadData, data.length, null);
+     * System.out.println("Complete uploading the data with eTag: " + response.getETag());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1088,7 +1466,15 @@ public ShareFileUploadInfo upload(InputStream data, long length, ParallelTransfe * *

Upload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadWithResponse#ShareFileUploadOptions-Duration-Context} + * + *
+     * InputStream uploadData = new ByteArrayInputStream(data);
+     * Response<ShareFileUploadInfo> response = shareFileAsyncClient.uploadWithResponse(
+     *     new ShareFileUploadOptions(uploadData, data.length), Duration.ofSeconds(30), null);
+     * System.out.printf("Completed uploading the data with response %d%n.", response.getStatusCode());
+     * System.out.printf("ETag of the file is %s%n", response.getValue().getETag());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1113,7 +1499,13 @@ public Response uploadWithResponse(ShareFileUploadOptions o * *

Upload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRange#InputStream-long} + * + *
+     * InputStream uploadData = new ByteArrayInputStream(data);
+     * ShareFileUploadInfo response = shareFileClient.uploadRange(uploadData, data.length);
+     * System.out.println("Complete uploading the data with eTag: " + response.getETag());
+     * 
+ * * *

This method does a single Put Range operation. For more information, see the * Azure Docs.

@@ -1136,7 +1528,15 @@ public ShareFileUploadInfo uploadRange(InputStream data, long length) { * *

Upload data "default" to the file in Storage File Service.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeWithResponse#ShareFileUploadRangeOptions-Duration-Context} + * + *
+     * InputStream uploadData = new ByteArrayInputStream(data);
+     * Response<ShareFileUploadInfo> response = shareFileClient.uploadRangeWithResponse(
+     *     new ShareFileUploadRangeOptions(uploadData, data.length), Duration.ofSeconds(30), null);
+     * System.out.printf("Completed uploading the data with response %d%n.", response.getStatusCode());
+     * System.out.printf("ETag of the file is %s%n", response.getValue().getETag());
+     * 
+ * * *

This method does a single Put Range operation. For more information, see the * Azure Docs.

@@ -1162,7 +1562,12 @@ public Response uploadRangeWithResponse(ShareFileUploadRang * *

Upload a number of bytes from a file at defined source and destination offsets

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl#long-long-long-String} + * + *
+     * ShareFileUploadRangeFromUrlInfo response = fileClient.uploadRangeFromUrl(6, 8, 0, "sourceUrl");
+     * System.out.println("Completed upload range from url!");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1188,7 +1593,13 @@ public ShareFileUploadRangeFromUrlInfo uploadRangeFromUrl(long length, long dest * *

Upload a number of bytes from a file at defined source and destination offsets

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse#long-long-long-String-Duration-Context} + * + *
+     * Response<ShareFileUploadRangeFromUrlInfo> response =
+     *     fileClient.uploadRangeFromUrlWithResponse(6, 8, 0, "sourceUrl", Duration.ofSeconds(1), Context.NONE);
+     * System.out.println("Completed upload range from url!");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1219,7 +1630,14 @@ public Response uploadRangeFromUrlWithResponse( * *

Upload a number of bytes from a file at defined source and destination offsets

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse#long-long-long-String-ShareRequestConditions-Duration-Context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<ShareFileUploadRangeFromUrlInfo> response = fileClient.uploadRangeFromUrlWithResponse(6, 8, 0,
+     *     "sourceUrl", requestConditions, Duration.ofSeconds(1), Context.NONE);
+     * System.out.println("Completed upload range from url!");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1253,7 +1671,14 @@ public Response uploadRangeFromUrlWithResponse( * *

Upload a number of bytes from a file at defined source and destination offsets

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse#ShareFileUploadRangeFromUrlOptions-Duration-Context} + * + *
+     * Response<ShareFileUploadRangeFromUrlInfo> response =
+     *     fileClient.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(6, "sourceUrl")
+     *         .setDestinationOffset(8), Duration.ofSeconds(1), Context.NONE);
+     * System.out.println("Completed upload range from url!");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1283,7 +1708,12 @@ public Response uploadRangeFromUrlWithResponse( * *

Clears the first 1024 bytes.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRange#long} + * + *
+     * ShareFileUploadInfo response = fileClient.clearRange(1024);
+     * System.out.println("Complete clearing the range with eTag: " + response.getETag());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1304,7 +1734,13 @@ public ShareFileUploadInfo clearRange(long length) { * *

Clear the range starting from 1024 with length of 1024.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse#long-long-Duration-Context} + * + *
+     * Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse(1024, 1024,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete clearing the range with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1332,7 +1768,14 @@ public Response clearRangeWithResponse(long length, long of * *

Clear the range starting from 1024 with length of 1024.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse#long-long-ShareRequestConditions-Duration-Context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Response<ShareFileUploadInfo> response = fileClient.clearRangeWithResponse(1024, 1024, requestConditions,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete clearing the range with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1362,7 +1805,11 @@ public Response clearRangeWithResponse(long length, long of * *

Upload the file from the source file path.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadFromFile#string} + * + *
+     * fileClient.uploadFromFile("someFilePath");
+     * 
+ * * *

For more information, see the * Azure Docs Create File @@ -1383,7 +1830,12 @@ public void uploadFromFile(String uploadFilePath) { * *

Upload the file from the source file path.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadFromFile#string-ShareRequestConditions} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * fileClient.uploadFromFile("someFilePath", requestConditions);
+     * 
+ * * *

For more information, see the * Azure Docs Create File @@ -1405,7 +1857,13 @@ public void uploadFromFile(String uploadFilePath, ShareRequestConditions request * *

List all ranges for the file client.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges} + * + *
+     * Iterable<ShareFileRange> ranges = fileClient.listRanges();
+     * ranges.forEach(range ->
+     *     System.out.printf("List ranges completed with start: %d, end: %d", range.getStart(), range.getEnd()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1424,7 +1882,14 @@ public PagedIterable listRanges() { * *

List all ranges within the file range from 1KB to 2KB.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges#ShareFileRange-Duration-Context} + * + *
+     * Iterable<ShareFileRange> ranges = fileClient.listRanges(new ShareFileRange(1024, 2048L), Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * ranges.forEach(range ->
+     *     System.out.printf("List ranges completed with start: %d, end: %d", range.getStart(), range.getEnd()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1448,7 +1913,15 @@ public PagedIterable listRanges(ShareFileRange range, Duration t * *

List all ranges within the file range from 1KB to 2KB.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges#ShareFileRange-ShareRequestConditions-Duration-Context} + * + *
+     * ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId);
+     * Iterable<ShareFileRange> ranges = fileClient.listRanges(new ShareFileRange(1024, 2048L), requestConditions,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * ranges.forEach(range ->
+     *     System.out.printf("List ranges completed with start: %d, end: %d", range.getStart(), range.getEnd()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1475,7 +1948,15 @@ public PagedIterable listRanges(ShareFileRange range, ShareReque * *

List all ranges within the file range from 1KB to 2KB.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRangesDiff#String} + * + *
+     * ShareFileRangeList rangeList = fileClient.listRangesDiff("previoussnapshot");
+     * System.out.println("Valid Share File Ranges are:");
+     * for (FileRange range : rangeList.getRanges()) {
+     *     System.out.printf("Start: %s, End: %s%n", range.getStart(), range.getEnd());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1499,7 +1980,17 @@ public ShareFileRangeList listRangesDiff(String previousSnapshot) { * *

List all ranges within the file range from 1KB to 2KB.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRangesDiffWithResponse#ShareFileListRangesDiffOptions-Duration-Context} + * + *
+     * ShareFileRangeList rangeList = fileClient.listRangesDiffWithResponse(
+     *     new ShareFileListRangesDiffOptions("previoussnapshot")
+     *     .setRange(new ShareFileRange(1024, 2048L)), Duration.ofSeconds(1), new Context(key1, value1)).getValue();
+     * System.out.println("Valid Share File Ranges are:");
+     * for (FileRange range : rangeList.getRanges()) {
+     *     System.out.printf("Start: %s, End: %s%n", range.getStart(), range.getEnd());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1528,7 +2019,13 @@ public Response listRangesDiffWithResponse(ShareFileListRang * *

List all handles for the file client.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles} + * + *
+     * fileClient.listHandles()
+     *     .forEach(handleItem -> System.out.printf("List handles completed with handleId %s",
+     *         handleItem.getHandleId()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1547,7 +2044,13 @@ public PagedIterable listHandles() { * *

List 10 handles for the file client.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles#integer-duration-context} + * + *
+     * fileClient.listHandles(10, Duration.ofSeconds(1), new Context(key1, value1))
+     *     .forEach(handleItem -> System.out.printf("List handles completed with handleId %s",
+     *         handleItem.getHandleId()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1572,7 +2075,14 @@ public PagedIterable listHandles(Integer maxResultsPerPage, Duration * *

Force close handles returned by list handles.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandle#String} + * + *
+     * fileClient.listHandles().forEach(handleItem -> {
+     *     fileClient.forceCloseHandle(handleItem.getHandleId());
+     *     System.out.printf("Closed handle %s on resource %s%n", handleItem.getHandleId(), handleItem.getPath());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1592,7 +2102,16 @@ public CloseHandlesInfo forceCloseHandle(String handleId) { * *

Force close handles returned by list handles.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse#String} + * + *
+     * fileClient.listHandles().forEach(handleItem -> {
+     *     Response<CloseHandlesInfo> closeResponse = fileClient
+     *         .forceCloseHandleWithResponse(handleItem.getHandleId(), Duration.ofSeconds(30), Context.NONE);
+     *     System.out.printf("Closing handle %s on resource %s completed with status code %d%n",
+     *         handleItem.getHandleId(), handleItem.getPath(), closeResponse.getStatusCode());
+     * });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1617,7 +2136,13 @@ public Response forceCloseHandleWithResponse(String handleId, * *

Force close all handles.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles#Duration-Context} + * + *
+     * CloseHandlesInfo closeHandlesInfo = fileClient.forceCloseAllHandles(Duration.ofSeconds(30), Context.NONE);
+     * System.out.printf("Closed %d open handles on the file%n", closeHandlesInfo.getClosedHandles());
+     * System.out.printf("Failed to close %d open handles on the file%n", closeHandlesInfo.getFailedHandles());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1642,7 +2167,20 @@ public CloseHandlesInfo forceCloseAllHandles(Duration timeout, Context context) * *

Get the share snapshot id.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareSnapshotId} + * + *
+     * OffsetDateTime currentTime = OffsetDateTime.of(LocalDateTime.now(), ZoneOffset.UTC);
+     * ShareFileClient fileClient = new ShareFileClientBuilder()
+     *     .endpoint("https://${accountName}.file.core.windows.net")
+     *     .sasToken("${SASToken}")
+     *     .shareName("myshare")
+     *     .resourcePath("myfile")
+     *     .snapshot(currentTime.toString())
+     *     .buildFileClient();
+     *
+     * System.out.printf("Snapshot ID: %s%n", fileClient.getShareSnapshotId());
+     * 
+ * * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. @@ -1656,7 +2194,12 @@ public String getShareSnapshotId() { * *

Get the share name.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareName} + * + *
+     * String shareName = fileClient.getShareName();
+     * System.out.println("The share name of the directory is " + shareName);
+     * 
+ * * * @return The share name of the file. */ @@ -1669,7 +2212,12 @@ public String getShareName() { * *

Get the file path.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.getFilePath} + * + *
+     * String filePath = fileClient.getFilePath();
+     * System.out.println("The name of the file is " + filePath);
+     * 
+ * * * @return The path of the file. */ @@ -1703,7 +2251,17 @@ public HttpPipeline getHttpPipeline() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.generateSas#ShareServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareFileSasPermission permission = new ShareFileSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * shareFileClient.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @@ -1720,7 +2278,18 @@ public String generateSas(ShareServiceSasSignatureValues shareServiceSasSignatur * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.generateSas#ShareServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * ShareFileSasPermission permission = new ShareFileSasPermission().setReadPermission(true);
+     *
+     * ShareServiceSasSignatureValues values = new ShareServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * shareFileClient.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param shareServiceSasSignatureValues {@link ShareServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClientBuilder.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClientBuilder.java index 8bc4b05e954fe..d1a290efe67c1 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClientBuilder.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareFileClientBuilder.java @@ -54,17 +54,51 @@ * {@link #sasToken(String) SAS token} that authorizes the client.

* *

Instantiating a synchronous File Client with SAS token

- * {@codesnippet com.azure.storage.file.share.ShareFileClient.instantiation.sastoken} + * + *
+ * ShareFileClient fileClient = new ShareFileClientBuilder()
+ *     .endpoint("https://${accountName}.file.core.windows.net?${SASToken}")
+ *     .shareName("myshare")
+ *     .resourcePath("myfilepath")
+ *     .buildFileClient();
+ * 
+ * * *

Instantiating an Asynchronous File Client with SAS token

- * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.instantiation.sastoken} + * + *
+ * ShareDirectoryClient shareDirectoryClient = new ShareFileClientBuilder()
+ *     .endpoint("https://${accountName}.file.core.windows.net?${SASToken}")
+ *     .shareName("myshare")
+ *     .resourcePath("mydirectory")
+ *     .buildDirectoryClient();
+ * 
+ * * *

If the {@code endpoint} doesn't contain the query parameters to construct a SAS token it may be set using * {@link #sasToken(String) sasToken}.

* - * {@codesnippet com.azure.storage.file.share.ShareFileClient.instantiation.credential} + * + *
+ * ShareFileClient fileClient = new ShareFileClientBuilder()
+ *     .endpoint("https://${accountName}.file.core.windows.net")
+ *     .sasToken("${SASTokenQueryParams}")
+ *     .shareName("myshare")
+ *     .resourcePath("myfilepath")
+ *     .buildFileClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.share.ShareFileAsyncClient.instantiation.credential} + * + *
+ * ShareFileAsyncClient shareFileAsyncClient = new ShareFileClientBuilder()
+ *     .endpoint("https://{accountName}.file.core.windows.net")
+ *     .sasToken("${SASTokenQueryParams}")
+ *     .shareName("myshare")
+ *     .resourcePath("myfilepath")
+ *     .buildFileAsyncClient();
+ * 
+ * * *

Another way to authenticate the client is using a {@link StorageSharedKeyCredential}. To create a * StorageSharedKeyCredential a connection string from the Storage File service must be used. @@ -73,10 +107,28 @@ * when authorizing requests sent to the service.

* *

Instantiating a synchronous File Client with connection string.

- * {@codesnippet com.azure.storage.file.share.ShareDirectoryClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key}"
+ *     + ";EndpointSuffix={core.windows.net}";
+ * ShareDirectoryClient shareDirectoryClient = new ShareFileClientBuilder()
+ *     .connectionString(connectionString)
+ *     .shareName("myshare")
+ *     .resourcePath("mydirectory")
+ *     .buildDirectoryClient();
+ * 
+ * * *

Instantiating an Asynchronous File Client with connection string.

- * {@codesnippet com.azure.storage.file.share.ShareDirectoryAsyncClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};"
+ *     + "EndpointSuffix={core.windows.net}";
+ * ShareDirectoryAsyncClient shareDirectoryAsyncClient = new ShareFileClientBuilder()
+ *     .connectionString(connectionString).shareName("myshare").resourcePath("mydirectory")
+ *     .buildDirectoryAsyncClient();
+ * 
+ * * * @see ShareFileClient * @see ShareFileAsyncClient diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceAsyncClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceAsyncClient.java index adbb7a293a3eb..a1415dd1a2f12 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceAsyncClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceAsyncClient.java @@ -55,7 +55,14 @@ * *

Instantiating an Asynchronous File Service Client

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.instantiation} + * + *
+ * ShareAsyncClient client = new ShareClientBuilder()
+ *     .connectionString("${connectionString}")
+ *     .endpoint("${endpoint}")
+ *     .buildAsyncClient();
+ * 
+ * * *

View {@link ShareServiceClientBuilder this} for additional ways to construct the azureFileStorageClient.

* @@ -136,7 +143,15 @@ public ShareAsyncClient getShareAsyncClient(String shareName, String snapshot) { * *

List all shares in the account

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.listShares} + * + *
+     * fileServiceAsyncClient.listShares().subscribe(
+     *     shareItem -> System.out.printf("Share %s exists in the account", shareItem.getName()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete listing the shares!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -170,11 +185,28 @@ public PagedFlux listShares() { * *

List all shares that begin with "azure"

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.listShares#ListSharesOptions.prefix} + * + *
+     * fileServiceAsyncClient.listShares(new ListSharesOptions().setPrefix("azure")).subscribe(
+     *     shareItem -> System.out.printf("Share %s exists in the account", shareItem.getName()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete listing the shares!")
+     * );
+     * 
+ * * *

List all shares including their snapshots and metadata

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.listShares#ListSharesOptions.metadata.snapshot} + * + *
+     * fileServiceAsyncClient.listShares(new ListSharesOptions().setIncludeMetadata(true).setIncludeSnapshots(true))
+     *     .subscribe(
+     *         shareItem -> System.out.printf("Share %s exists in the account", shareItem.getName()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete listing the shares!")
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -249,7 +281,15 @@ PagedFlux listSharesWithOptionalTimeout(String marker, ListSharesOpti * *

Retrieve File service properties

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.getProperties} + * + *
+     * fileServiceAsyncClient.getProperties()
+     *     .subscribe(properties -> {
+     *         System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b",
+     *             properties.getHourMetrics().isEnabled(), properties.getMinuteMetrics().isEnabled());
+     *     });
+     * 
+ * * *

For more information, see the * Azure @@ -274,7 +314,14 @@ public Mono getProperties() { * *

Retrieve File service properties

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.getPropertiesWithResponse} + * + *
+     * fileServiceAsyncClient.getPropertiesWithResponse()
+     *     .subscribe(properties -> System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b",
+     *         properties.getValue().getHourMetrics().isEnabled(),
+     *         properties.getValue().getMinuteMetrics().isEnabled()));
+     * 
+ * * *

For more information, see the * Azure @@ -310,7 +357,17 @@ Mono> getPropertiesWithResponse(Context context * *

Enable Minute and Hour Metrics

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.setProperties#fileServiceProperties} + * + *
+     * fileServiceAsyncClient.getProperties().subscribe(properties -> {
+     *     properties.getMinuteMetrics().setEnabled(true);
+     *     properties.getHourMetrics().setEnabled(true);
+     *
+     *     fileServiceAsyncClient.setProperties(properties)
+     *         .subscribe(r -> System.out.println("Setting File service properties completed."));
+     * });
+     * 
+ * * *

For more information, see the * Azure @@ -352,11 +409,32 @@ public Mono setProperties(ShareServiceProperties properties) { * *

Clear CORS in the File service

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.setPropertiesWithResponse#fileServiceProperties.clearCORS} + * + *
+     * fileServiceAsyncClient.getProperties().subscribe(properties -> {
+     *     properties.setCors(Collections.emptyList());
+     *
+     *     fileServiceAsyncClient.setPropertiesWithResponse(properties).subscribe(response ->
+     *         System.out.printf("Setting File service properties completed with status code %d",
+     *             response.getStatusCode()));
+     * });
+     * 
+ * * *

Enable Minute and Hour Metrics

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.setPropertiesWithResponseAsync#fileServiceProperties} + * + *
+     * fileServiceAsyncClient.getPropertiesWithResponse().subscribe(response -> {
+     *     ShareServiceProperties properties = response.getValue();
+     *     properties.getMinuteMetrics().setEnabled(true);
+     *     properties.getHourMetrics().setEnabled(true);
+     *
+     *     fileServiceAsyncClient.setPropertiesWithResponse(properties).subscribe(r ->
+     *         System.out.printf("Setting File service properties completed with status code %d", r.getStatusCode()));
+     * });
+     * 
+ * * *

For more information, see the * Azure @@ -401,7 +479,15 @@ Mono> setPropertiesWithResponse(ShareServiceProperties properties * *

Create the share "test"

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.createShare#string} + * + *
+     * fileServiceAsyncClient.createShare("myshare").subscribe(
+     *     response -> { },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete creating the share!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -427,11 +513,30 @@ public Mono createShare(String shareName) { * *

Create the share "test" with metadata "share:metadata"

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.createShareWithResponse#string-map-integer.metadata} + * + *
+     * fileServiceAsyncClient.createShareWithResponse("test", Collections.singletonMap("share", "metadata"), null)
+     *     .subscribe(
+     *         response -> System.out.printf("Creating the share completed with status code %d", response.getStatusCode()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete creating the share!")
+     *     );
+     * 
+ * * *

Create the share "test" with a quota of 10 GB

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.createShareWithResponse#string-map-integer.quota} + * + *
+     * fileServiceAsyncClient.createShareWithResponse("test", null, 10)
+     *     .subscribe(
+     *         response -> System.out.printf("Creating the share completed with status code %d",
+     *             response.getStatusCode()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete creating the share!")
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -461,7 +566,18 @@ public Mono> createShareWithResponse(String shareName * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.createShareWithResponse#String-ShareCreateOptions} + * + *
+     * fileServiceAsyncClient.createShareWithResponse("test", new ShareCreateOptions()
+     *     .setMetadata(Collections.singletonMap("share", "metadata")).setQuotaInGb(1)
+     *     .setAccessTier(ShareAccessTier.HOT)).subscribe(
+     *         response -> System.out.printf("Creating the share completed with status code %d",
+     *             response.getStatusCode()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete creating the share!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -497,7 +613,13 @@ Mono> createShareWithResponse(String shareName, Share * *

Delete the share "test"

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.deleteShare#string} + * + *
+     * fileServiceAsyncClient.deleteShare("test").doOnSuccess(
+     *     response -> System.out.println("Deleting the share completed.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -523,7 +645,14 @@ public Mono deleteShare(String shareName) { * *

Delete the snapshot of share "test" that was created at current time.

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.deleteShareWithResponse#string-string} + * + *
+     * OffsetDateTime midnight = OffsetDateTime.of(LocalDateTime.now(), ZoneOffset.UTC);
+     * fileServiceAsyncClient.deleteShareWithResponse("test", midnight.toString())
+     *     .subscribe(response -> System.out.printf("Deleting the snapshot completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -579,7 +708,22 @@ public HttpPipeline getHttpPipeline() { * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to blob * containers and file shares.

- * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.generateAccountSas#AccountSasSignatureValues} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = fileServiceAsyncClient.generateAccountSas(sasValues);
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @@ -596,7 +740,22 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to blob * containers and file shares.

- * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.generateAccountSas#AccountSasSignatureValues-Context} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = fileServiceAsyncClient.generateAccountSas(sasValues, new Context("key", "value"));
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. @@ -623,7 +782,19 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.undeleteShare#String-String} + * + *
+     * ListSharesOptions listSharesOptions = new ListSharesOptions();
+     * listSharesOptions.setIncludeDeleted(true);
+     * fileServiceAsyncClient.listShares(listSharesOptions).flatMap(
+     *     deletedShare -> {
+     *         Mono<ShareAsyncClient> shareAsyncClient = fileServiceAsyncClient.undeleteShare(
+     *             deletedShare.getName(), deletedShare.getVersion());
+     *         return shareAsyncClient;
+     *     }
+     * ).blockFirst();
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -654,7 +825,19 @@ public Mono undeleteShare( * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.undeleteShareWithResponse#String-String} + * + *
+     * ListSharesOptions listSharesOptions = new ListSharesOptions();
+     * listSharesOptions.setIncludeDeleted(true);
+     * fileServiceAsyncClient.listShares(listSharesOptions).flatMap(
+     *     deletedShare -> {
+     *         Mono<ShareAsyncClient> shareAsyncClient = fileServiceAsyncClient.undeleteShareWithResponse(
+     *             deletedShare.getName(), deletedShare.getVersion()).map(Response::getValue);
+     *         return shareAsyncClient;
+     *     }
+     * ).blockFirst();
+     * 
+ * * *

For more information, see the * Azure Docs.

diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceClient.java index a7c7e27ed2868..493cdc5587541 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceClient.java @@ -32,7 +32,14 @@ * *

Instantiating a Synchronous File Service Client

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.instantiation} + * + *
+ * ShareServiceClient client = new ShareServiceClientBuilder()
+ *     .connectionString("${connectionString}")
+ *     .endpoint("${endpoint}")
+ *     .buildClient();
+ * 
+ * * *

View {@link ShareServiceClientBuilder this} for additional ways to construct the shareServiceAsyncClient.

* @@ -92,7 +99,13 @@ public ShareClient getShareClient(String shareName) { * *

List all shares in the account

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.listShares} + * + *
+     * fileServiceClient.listShares().forEach(
+     *     shareItem -> System.out.printf("Share %s exists in the account", shareItem.getName())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -122,11 +135,25 @@ public PagedIterable listShares() { * *

List all shares that begin with "azure"

* - * {@codesnippet ShareServiceClient.listShares#ListSharesOptions-Duration-Context1} + * + *
+     * fileServiceClient.listShares(new ListSharesOptions().setPrefix("azure"), Duration.ofSeconds(1),
+     *     new Context(key1, value1)).forEach(
+     *         shareItem -> System.out.printf("Share %s exists in the account", shareItem.getName())
+     * );
+     * 
+ * * *

List all shares including their snapshots and metadata

* - * {@codesnippet ShareServiceClient.listShares#ListSharesOptions-Duration-Context2} + * + *
+     * fileServiceClient.listShares(new ListSharesOptions().setIncludeMetadata(true)
+     *     .setIncludeSnapshots(true), Duration.ofSeconds(1), new Context(key1, value1)).forEach(
+     *         shareItem -> System.out.printf("Share %s exists in the account", shareItem.getName())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -153,7 +180,13 @@ public PagedIterable listShares(ListSharesOptions options, Duration t * *

Retrieve File service properties

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.getProperties} + * + *
+     * ShareServiceProperties properties = fileServiceClient.getProperties();
+     * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b", properties.getHourMetrics().isEnabled(),
+     *     properties.getMinuteMetrics().isEnabled());
+     * 
+ * * *

For more information, see the * Azure @@ -174,7 +207,14 @@ public ShareServiceProperties getProperties() { * *

Retrieve File service properties

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.getPropertiesWithResponse#duration-context} + * + *
+     * ShareServiceProperties properties = fileServiceClient.getPropertiesWithResponse(
+     *     Duration.ofSeconds(1), new Context(key1, value1)).getValue();
+     * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b", properties.getHourMetrics().isEnabled(),
+     *     properties.getMinuteMetrics().isEnabled());
+     * 
+ * * *

For more information, see the * Azure @@ -205,11 +245,30 @@ public Response getPropertiesWithResponse(Duration timeo * *

Clear CORS in the File service

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse#fileServiceProperties-Context.clearCORS} + * + *
+     * ShareServiceProperties properties = fileServiceClient.getProperties();
+     * properties.setCors(Collections.emptyList());
+     *
+     * Response<Void> response = fileServiceClient.setPropertiesWithResponse(properties,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting File service properties completed with status code %d", response.getStatusCode());
+     * 
+ * * *

Enable Minute and Hour Metrics

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.setProperties#fileServiceProperties} + * + *
+     * ShareServiceProperties properties = fileServiceClient.getProperties();
+     *
+     * properties.getMinuteMetrics().setEnabled(true);
+     * properties.getHourMetrics().setEnabled(true);
+     *
+     * fileServiceClient.setProperties(properties);
+     * System.out.println("Setting File service properties completed.");
+     * 
+ * * *

For more information, see the * Azure @@ -246,11 +305,32 @@ public void setProperties(ShareServiceProperties properties) { * *

Clear CORS in the File service

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse#fileServiceProperties-Context.clearCORS} + * + *
+     * ShareServiceProperties properties = fileServiceClient.getProperties();
+     * properties.setCors(Collections.emptyList());
+     *
+     * Response<Void> response = fileServiceClient.setPropertiesWithResponse(properties,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting File service properties completed with status code %d", response.getStatusCode());
+     * 
+ * * *

Enable Minute and Hour Metrics

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.setPropertiesWithResponse#fileServiceProperties-Context} + * + *
+     * ShareServiceProperties properties = fileServiceClient.getPropertiesWithResponse(
+     *     Duration.ofSeconds(1), new Context(key1, value1)).getValue();
+     *
+     * properties.getMinuteMetrics().setEnabled(true);
+     * properties.getHourMetrics().setEnabled(true);
+     *
+     * Response<Void> response = fileServiceClient.setPropertiesWithResponse(properties,
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting File service properties completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure @@ -289,7 +369,12 @@ public Response setPropertiesWithResponse(ShareServiceProperties propertie * *

Create the share with share name of "myshare"

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.createShare#string} + * + *
+     * fileServiceClient.createShare("myshare");
+     * System.out.println("Creating the share completed.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -311,7 +396,14 @@ public ShareClient createShare(String shareName) { * *

Create the share "test" with a quota of 10 GB

* - * {@codesnippet ShareServiceClient.createShareWithResponse#string-map-integer-duration-context} + * + *
+     * Response<ShareClient> response = fileServiceClient.createShareWithResponse("test",
+     *     Collections.singletonMap("share", "metadata"), null, Duration.ofSeconds(5),
+     *     new Context(key1, value1));
+     * System.out.printf("Creating the share completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -341,7 +433,14 @@ public Response createShareWithResponse(String shareName, MapCode Samples

* - * {@codesnippet ShareServiceClient.createShareWithResponse#String-ShareCreateOptions-Duration-Context} + * + *
+     * Response<ShareClient> response = fileServiceClient.createShareWithResponse("test",
+     *     new ShareCreateOptions().setMetadata(Collections.singletonMap("share", "metadata")).setQuotaInGb(1)
+     *     .setAccessTier(ShareAccessTier.HOT), Duration.ofSeconds(5), new Context(key1, value1));
+     * System.out.printf("Creating the share completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -370,7 +469,11 @@ public Response createShareWithResponse(String shareName, ShareCrea * *

Delete the share "test"

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.deleteShare#string} + * + *
+     * fileServiceClient.deleteShare("myshare");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -391,7 +494,14 @@ public void deleteShare(String shareName) { * *

Delete the snapshot of share "test" that was created at current time.

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.deleteShareWithResponse#string-string-duration-context} + * + *
+     * OffsetDateTime midnight = OffsetDateTime.of(LocalDateTime.now(), ZoneOffset.UTC);
+     * Response<Void> response = fileServiceClient.deleteShareWithResponse("test", midnight.toString(),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Deleting the snapshot completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -439,7 +549,22 @@ public HttpPipeline getHttpPipeline() { *

Generating an account SAS

*

The snippet below generates an AccountSasSignatureValues object that lasts for two days and gives the user * read and list access to blob and file shares.

- * {@codesnippet com.azure.storage.file.share.ShareServiceClient.generateAccountSas#AccountSasSignatureValues} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = fileServiceClient.generateAccountSas(sasValues);
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @@ -456,7 +581,22 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to blob * containers and file shares.

- * {@codesnippet com.azure.storage.file.share.ShareServiceClient.generateAccountSas#AccountSasSignatureValues-Context} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true);
+     * AccountSasService services = new AccountSasService().setBlobAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = fileServiceClient.generateAccountSas(sasValues, new Context("key", "value"));
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. @@ -482,7 +622,18 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.undeleteShare#String-String} + * + *
+     * ListSharesOptions listSharesOptions = new ListSharesOptions();
+     * listSharesOptions.setIncludeDeleted(true);
+     * fileServiceClient.listShares(listSharesOptions, Duration.ofSeconds(1), context).forEach(
+     *     deletedShare -> {
+     *         ShareClient shareClient = fileServiceClient.undeleteShare(
+     *             deletedShare.getName(), deletedShare.getVersion());
+     *     }
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -513,7 +664,18 @@ public ShareClient undeleteShare(String deletedShareName, String deletedShareVer * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.undeleteShareWithResponse#String-String-Duration-Context} + * + *
+     * ListSharesOptions listSharesOptions = new ListSharesOptions();
+     * listSharesOptions.setIncludeDeleted(true);
+     * fileServiceClient.listShares(listSharesOptions, Duration.ofSeconds(1), context).forEach(
+     *     deletedShare -> {
+     *         ShareClient shareClient = fileServiceClient.undeleteShareWithResponse(
+     *             deletedShare.getName(), deletedShare.getVersion(), Duration.ofSeconds(1), context).getValue();
+     *     }
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceClientBuilder.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceClientBuilder.java index 886df0157c6eb..bce1cc4db9d56 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceClientBuilder.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/ShareServiceClientBuilder.java @@ -45,17 +45,43 @@ * SAS token that authorizes the client.

* *

Instantiating a synchronous FileService Client with SAS token

- * {@codesnippet com.azure.storage.file.share.ShareServiceClient.instantiation.sastoken} + * + *
+ * ShareServiceClient fileServiceClient = new ShareServiceClientBuilder()
+ *     .endpoint("https://${accountName}.file.core.windows.net?${SASToken}")
+ *     .buildClient();
+ * 
+ * * *

Instantiating an Asynchronous FileService Client with SAS token

- * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.instantiation.sastoken} + * + *
+ * ShareServiceAsyncClient fileServiceAsyncClient = new ShareServiceClientBuilder()
+ *     .endpoint("https://{accountName}.file.core.windows.net?{SASToken}")
+ *     .buildAsyncClient();
+ * 
+ * * *

If the {@code endpoint} doesn't contain the query parameters to construct a SAS token they may be set using * {@link #sasToken(String) sasToken} .

* - * {@codesnippet com.azure.storage.file.share.ShareServiceClient.instantiation.credential} + * + *
+ * ShareServiceClient fileServiceClient = new ShareServiceClientBuilder()
+ *     .endpoint("https://{accountName}.file.core.windows.net")
+ *     .sasToken("${SASTokenQueryParams}")
+ *     .buildClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.instantiation.credential} + * + *
+ * ShareServiceAsyncClient fileServiceAsyncClient = new ShareServiceClientBuilder()
+ *     .endpoint("https://{accountName}.file.core.windows.net")
+ *     .sasToken("${SASTokenQueryParams}")
+ *     .buildAsyncClient();
+ * 
+ * * *

Another way to authenticate the client is using a {@link StorageSharedKeyCredential}. To create a * StorageSharedKeyCredential a connection string from the Storage File service must be used. Set the @@ -64,10 +90,26 @@ * when authorizing requests sent to the service.

* *

Instantiating a synchronous FileService Client with connection string.

- * {@codesnippet com.azure.storage.file.share.ShareServiceClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};"
+ *     + "EndpointSuffix={core.windows.net}";
+ * ShareServiceClient fileServiceClient = new ShareServiceClientBuilder()
+ *     .connectionString(connectionString)
+ *     .buildClient();
+ * 
+ * * *

Instantiating an Asynchronous FileService Client with connection string.

- * {@codesnippet com.azure.storage.file.share.ShareServiceAsyncClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};AccountKey={key};"
+ *     + "EndpointSuffix={core.windows.net}";
+ * ShareServiceAsyncClient fileServiceAsyncClient = new ShareServiceClientBuilder()
+ *     .connectionString(connectionString)
+ *     .buildAsyncClient();
+ * 
+ * * * @see ShareServiceClient * @see ShareServiceAsyncClient diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseAsyncClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseAsyncClient.java index 0fa3c40dc5a9f..98f8f9ad84505 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseAsyncClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseAsyncClient.java @@ -32,7 +32,13 @@ * *

Instantiating a ShareLeaseAsyncClient

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClientBuilder.asyncInstantiation} + * + *
+ * ShareLeaseAsyncClient fileLeaseAsyncClient = new ShareLeaseClientBuilder()
+ *     .fileAsyncClient(shareFileAsyncClient)
+ *     .buildAsyncClient();
+ * 
+ * * *

View {@link ShareLeaseClientBuilder this} for additional ways to construct the client.

* @@ -110,7 +116,11 @@ public String getLeaseId() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.acquireLease} + * + *
+     * client.acquireLease().subscribe(response -> System.out.printf("Lease ID is %s%n", response));
+     * 
+ * * * @return A reactive response containing the lease ID. */ @@ -128,7 +138,12 @@ public Mono acquireLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.acquireLeaseWithResponse} + * + *
+     * client.acquireLeaseWithResponse().subscribe(response ->
+     *     System.out.printf("Lease ID is %s%n", response.getValue()));
+     * 
+ * * * @return A reactive response containing the lease ID. */ @@ -146,7 +161,12 @@ public Mono> acquireLeaseWithResponse() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.acquireLeaseWithResponse#ShareAcquireLeaseOptions} + * + *
+     * client.acquireLeaseWithResponse(new ShareAcquireLeaseOptions().setDuration(10)).subscribe(response ->
+     *     System.out.printf("Lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param options {@link ShareAcquireLeaseOptions} * @return A reactive response containing the lease ID. @@ -186,7 +206,11 @@ Mono> acquireLeaseWithResponse(ShareAcquireLeaseOptions options * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.releaseLease} + * + *
+     * client.releaseLease().subscribe(response -> System.out.println("Completed release lease"));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -204,7 +228,12 @@ public Mono releaseLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.releaseLeaseWithResponse} + * + *
+     * client.releaseLeaseWithResponse().subscribe(response ->
+     *     System.out.printf("Release lease completed with status %d%n", response.getStatusCode()));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -235,7 +264,12 @@ Mono> releaseLeaseWithResponse(Context context) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.breakLease} + * + *
+     * client.breakLease().subscribe(response ->
+     *     System.out.println("The lease has been successfully broken"));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -255,7 +289,12 @@ public Mono breakLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.breakLeaseWithResponse} + * + *
+     * client.breakLeaseWithResponse().subscribe(response ->
+     *     System.out.println("The lease has been successfully broken"));
+     * 
+ * * * @return A reactive response signalling completion. */ @@ -275,7 +314,12 @@ public Mono> breakLeaseWithResponse() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.breakLeaseWithResponse#ShareBreakLeaseOptions} + * + *
+     * client.breakLeaseWithResponse(new ShareBreakLeaseOptions().setBreakPeriod(Duration.ofSeconds(25)))
+     *     .subscribe(response -> System.out.println("The lease has been successfully broken"));
+     * 
+ * * * @param options {@link ShareBreakLeaseOptions} * @return A reactive response signalling completion. @@ -311,7 +355,11 @@ Mono> breakLeaseWithResponse(ShareBreakLeaseOptions options, Cont * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.changeLease#String} + * + *
+     * client.changeLease("proposedId").subscribe(response -> System.out.printf("Changed lease ID is %s%n", response));
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @return A reactive response containing the new lease ID. @@ -330,7 +378,12 @@ public Mono changeLease(String proposedId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.changeLeaseWithResponse#String} + * + *
+     * client.changeLeaseWithResponse("proposedId").subscribe(response ->
+     *     System.out.printf("Changed lease ID is %s%n", response.getValue()));
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @return A reactive response containing the new lease ID. @@ -367,7 +420,11 @@ Mono> changeLeaseWithResponse(String proposedId, Context contex * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.renewLease} + * + *
+     * client.renewLease().subscribe(response -> System.out.printf("Renewed lease ID is %s%n", response));
+     * 
+ * * * @return A reactive response containing the renewed lease ID. */ @@ -385,7 +442,12 @@ public Mono renewLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseAsyncClient.renewLeaseWithResponse} + * + *
+     * client.renewLeaseWithResponse().subscribe(response ->
+     *     System.out.printf("Renewed lease ID is %s%n", response.getValue()));
+     * 
+ * * * @return A reactive response containing the renewed lease ID. */ diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseClient.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseClient.java index a390ae924b4d2..f59ac993703a9 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseClient.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseClient.java @@ -22,7 +22,13 @@ * *

Instantiating a ShareLeaseClient

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClientBuilder.syncInstantiation} + * + *
+ * ShareLeaseClient fileLeaseClient = new ShareLeaseClientBuilder()
+ *     .fileClient(shareFileClient)
+ *     .buildClient();
+ * 
+ * * *

View {@link ShareLeaseClientBuilder this} for additional ways to construct the client.

* @@ -73,7 +79,11 @@ public String getLeaseId() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.acquireLease} + * + *
+     * System.out.printf("Lease ID is %s%n", client.acquireLease());
+     * 
+ * * * @return The lease ID. */ @@ -87,7 +97,13 @@ public String acquireLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.acquireLeaseWithResponse#Duration-Context} + * + *
+     * System.out.printf("Lease ID is %s%n", client
+     *     .acquireLeaseWithResponse(timeout, new Context(key, value))
+     *     .getValue());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -103,7 +119,13 @@ public Response acquireLeaseWithResponse(Duration timeout, Context conte * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.acquireLeaseWithResponse#ShareAcquireLeaseOptions-Duration-Context} + * + *
+     * System.out.printf("Lease ID is %s%n", client
+     *     .acquireLeaseWithResponse(new ShareAcquireLeaseOptions().setDuration(10), timeout, new Context(key, value))
+     *     .getValue());
+     * 
+ * * * @param options {@link ShareAcquireLeaseOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -122,7 +144,12 @@ public Response acquireLeaseWithResponse(ShareAcquireLeaseOptions option * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.releaseLease} + * + *
+     * client.releaseLease();
+     * System.out.println("Release lease completed");
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public void releaseLease() { @@ -134,7 +161,13 @@ public void releaseLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.releaseLeaseWithResponse#Duration-Context} + * + *
+     * System.out.printf("Release lease completed with status %d%n",
+     *     client.releaseLeaseWithResponse(timeout, new Context(key, value))
+     *         .getStatusCode());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -150,7 +183,12 @@ public Response releaseLeaseWithResponse(Duration timeout, Context context * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.breakLease} + * + *
+     * client.breakLease();
+     * System.out.println("The lease has been successfully broken");
+     * 
+ * */ @ServiceMethod(returns = ReturnType.SINGLE) public void breakLease() { @@ -162,7 +200,12 @@ public void breakLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.breakLeaseWithResponse#Duration-Context} + * + *
+     * client.breakLeaseWithResponse(timeout, new Context(key, value));
+     * System.out.println("The lease has been successfully broken");
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. @@ -178,7 +221,13 @@ public Response breakLeaseWithResponse(Duration timeout, Context context) * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.breakLeaseWithResponse#ShareBreakLeaseOptions-Duration-Context} + * + *
+     * client.breakLeaseWithResponse(new ShareBreakLeaseOptions().setBreakPeriod(Duration.ofSeconds(25)),
+     *     timeout, new Context(key, value));
+     * System.out.println("The lease has been successfully broken");
+     * 
+ * * * @param options {@link ShareBreakLeaseOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -195,7 +244,11 @@ public Response breakLeaseWithResponse(ShareBreakLeaseOptions options, Dur * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.changeLease#String} + * + *
+     * System.out.printf("Changed lease ID is %s%n", client.changeLease("proposedId"));
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @return The new lease ID. @@ -210,7 +263,13 @@ public String changeLease(String proposedId) { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.changeLeaseWithResponse#String-Duration-Context} + * + *
+     * System.out.printf("Changed lease ID is %s%n",
+     *     client.changeLeaseWithResponse("proposedId", timeout, new Context(key, value))
+     *         .getValue());
+     * 
+ * * * @param proposedId A new lease ID in a valid GUID format. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. @@ -228,7 +287,11 @@ public Response changeLeaseWithResponse(String proposedId, Duration time * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.renewLease} + * + *
+     * System.out.printf("Renewed lease ID is %s%n", client.renewLease());
+     * 
+ * * * @return A response containing the renewed lease ID. */ @@ -242,7 +305,13 @@ public String renewLease() { * *

Code Samples

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClient.renewLeaseWithResponse#Duration-Context} + * + *
+     * System.out.printf("Renewed lease ID is %s%n",
+     *     client.releaseLeaseWithResponse(timeout, new Context(key, value))
+     *         .getValue());
+     * 
+ * * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. diff --git a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseClientBuilder.java b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseClientBuilder.java index 3fde5a9dd8f01..0918734e0e3e5 100644 --- a/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseClientBuilder.java +++ b/sdk/storage/azure-storage-file-share/src/main/java/com/azure/storage/file/share/specialized/ShareLeaseClientBuilder.java @@ -27,15 +27,43 @@ * *

Instantiating LeaseClients

* - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClientBuilder.syncInstantiationWithFileAndLeaseId} + * + *
+ * ShareLeaseClient fileLeaseClient = new ShareLeaseClientBuilder()
+ *     .fileClient(shareFileClient)
+ *     .leaseId(leaseId)
+ *     .buildClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.share.specialized.ShareLeaseClientBuilder.syncInstantiationWithShareAndLeaseId} + * + *
+ * ShareLeaseClient fileLeaseClient = new ShareLeaseClientBuilder()
+ *     .shareClient(shareClient)
+ *     .leaseId(leaseId)
+ *     .buildClient();
+ * 
+ * * *

Instantiating LeaseAsyncClients

* - * {@codesnippet com.azure.storage.file.specialized.ShareLeaseClientBuilder.asyncInstantiationWithFileAndLeaseId} + * + *
+ * ShareLeaseAsyncClient fileLeaseAsyncClient = new ShareLeaseClientBuilder()
+ *     .fileAsyncClient(shareFileAsyncClient)
+ *     .leaseId(leaseId)
+ *     .buildAsyncClient();
+ * 
+ * * - * {@codesnippet com.azure.storage.file.specialized.ShareLeaseClientBuilder.asyncInstantiationWithShareAndLeaseId} + * + *
+ * ShareLeaseAsyncClient fileLeaseAsyncClient = new ShareLeaseClientBuilder()
+ *     .shareAsyncClient(shareAsyncClient)
+ *     .leaseId(leaseId)
+ *     .buildAsyncClient();
+ * 
+ * * * @see ShareLeaseClient * @see ShareLeaseAsyncClient diff --git a/sdk/storage/azure-storage-file-share/src/samples/java/com/azure/storage/file/share/ReadmeSamples.java b/sdk/storage/azure-storage-file-share/src/samples/java/com/azure/storage/file/share/ReadmeSamples.java index 8067d33ba44c0..41d195eb82350 100644 --- a/sdk/storage/azure-storage-file-share/src/samples/java/com/azure/storage/file/share/ReadmeSamples.java +++ b/sdk/storage/azure-storage-file-share/src/samples/java/com/azure/storage/file/share/ReadmeSamples.java @@ -32,12 +32,13 @@ import org.slf4j.LoggerFactory; /** - * WARNING: MODIFYING THIS FILE WILL REQUIRE CORRESPONDING UPDATES TO README.md FILE. LINE NUMBERS + * WARNING: MODIFYING THIS FILE WILL REQUIRE CORRESPONDING UPDATES TO README.md FILE. LINE NUMBERS * ARE USED TO EXTRACT APPROPRIATE CODE SEGMENTS FROM THIS FILE. ADD NEW CODE AT THE BOTTOM TO AVOID CHANGING * LINE NUMBERS OF EXISTING CODE SAMPLES. - * + * * Code samples for the README.md */ +@SuppressWarnings("unused") public class ReadmeSamples { private static final String ACCOUNT_NAME = System.getenv("AZURE_STORAGE_ACCOUNT_NAME"); private static final String SAS_TOKEN = System.getenv("PRIMARY_SAS_TOKEN"); @@ -49,217 +50,289 @@ public class ReadmeSamples { ShareFileClient fileClient = new ShareFileClientBuilder().buildFileClient(); private Logger logger = LoggerFactory.getLogger(ReadmeSamples.class); - - public void createShareSeviceClient() { + + public void handleException() { + // BEGIN: readme-sample-handleException + try { + shareServiceClient.createShare("myShare"); + } catch (ShareStorageException e) { + logger.error("Failed to create a share with error code: " + e.getErrorCode()); + } + // END: readme-sample-handleException + } + + public void createShareServiceClient() { + // BEGIN: readme-sample-createShareServiceClient String shareServiceURL = String.format("https://%s.file.core.windows.net", ACCOUNT_NAME); ShareServiceClient shareServiceClient = new ShareServiceClientBuilder().endpoint(shareServiceURL) .sasToken(SAS_TOKEN).buildClient(); + // END: readme-sample-createShareServiceClient } public void createShareClient() { String shareName = "testshare"; + + // BEGIN: readme-sample-createShareClient String shareURL = String.format("https://%s.file.core.windows.net", ACCOUNT_NAME); ShareClient shareClient = new ShareClientBuilder().endpoint(shareURL) .sasToken(SAS_TOKEN).shareName(shareName).buildClient(); + // END: readme-sample-createShareClient } public void createShareClientWithConnectionString() { String shareName = "testshare"; + + // BEGIN: readme-sample-createShareClientWithConnectionString String shareURL = String.format("https://%s.file.core.windows.net", ACCOUNT_NAME); ShareClient shareClient = new ShareClientBuilder().endpoint(shareURL) .connectionString(CONNECTION_STRING).shareName(shareName).buildClient(); + // END: readme-sample-createShareClientWithConnectionString } - + public void createDirectoryClient() { String shareName = "testshare"; String directoryPath = "directoryPath"; + + // BEGIN: readme-sample-createDirectoryClient String directoryURL = String.format("https://%s.file.core.windows.net", ACCOUNT_NAME); ShareDirectoryClient directoryClient = new ShareFileClientBuilder().endpoint(directoryURL) .sasToken(SAS_TOKEN).shareName(shareName).resourcePath(directoryPath).buildDirectoryClient(); + // END: readme-sample-createDirectoryClient } public void createFileClient() { String shareName = "testshare"; String directoryPath = "directoryPath"; String fileName = "fileName"; + + // BEGIN: readme-sample-createFileClient String fileURL = String.format("https://%s.file.core.windows.net", ACCOUNT_NAME); ShareFileClient fileClient = new ShareFileClientBuilder().connectionString(CONNECTION_STRING) .endpoint(fileURL).shareName(shareName).resourcePath(directoryPath + "/" + fileName).buildFileClient(); + // END: readme-sample-createFileClient } public void createShare() { + // BEGIN: readme-sample-createShare String shareName = "testshare"; shareServiceClient.createShare(shareName); + // END: readme-sample-createShare } public void createSnapshotOnShare() { + // BEGIN: readme-sample-createSnapshotOnShare String shareName = "testshare"; ShareClient shareClient = shareServiceClient.getShareClient(shareName); shareClient.createSnapshot(); + // END: readme-sample-createSnapshotOnShare } public void createDirectory() { + // BEGIN: readme-sample-createDirectory String dirName = "testdir"; shareClient.createDirectory(dirName); + // END: readme-sample-createDirectory } public void createSubDirectory() { + // BEGIN: readme-sample-createSubDirectory String subDirName = "testsubdir"; directoryClient.createSubdirectory(subDirName); + // END: readme-sample-createSubDirectory } public void createFile() { + // BEGIN: readme-sample-createFile String fileName = "testfile"; long maxSize = 1024; directoryClient.createFile(fileName, maxSize); + // END: readme-sample-createFile } public void getShareList() { + // BEGIN: readme-sample-getShareList shareServiceClient.listShares(); + // END: readme-sample-getShareList } - + public void getSubDirectoryAndFileList() { + // BEGIN: readme-sample-getSubDirectoryAndFileList directoryClient.listFilesAndDirectories(); + // END: readme-sample-getSubDirectoryAndFileList } public void getRangeList() { + // BEGIN: readme-sample-getRangeList fileClient.listRanges(); + // END: readme-sample-getRangeList } public void deleteShare() { + // BEGIN: readme-sample-deleteShare shareClient.delete(); + // END: readme-sample-deleteShare } public void deleteDirectory() { + // BEGIN: readme-sample-deleteDirectory String dirName = "testdir"; shareClient.deleteDirectory(dirName); + // END: readme-sample-deleteDirectory } public void deleteSubDirectory() { + // BEGIN: readme-sample-deleteSubDirectory String subDirName = "testsubdir"; directoryClient.deleteSubdirectory(subDirName); + // END: readme-sample-deleteSubDirectory } public void deleteFile() { + // BEGIN: readme-sample-deleteFile String fileName = "testfile"; directoryClient.deleteFile(fileName); + // END: readme-sample-deleteFile } public void copyFile() { + // BEGIN: readme-sample-copyFile String sourceURL = "https://myaccount.file.core.windows.net/myshare/myfile"; Duration pollInterval = Duration.ofSeconds(2); SyncPoller poller = fileClient.beginCopy(sourceURL, null, pollInterval); + // END: readme-sample-copyFile } public void abortCopyFile() { + // BEGIN: readme-sample-abortCopyFile fileClient.abortCopy("copyId"); + // END: readme-sample-abortCopyFile } public void uploadDataToStorage() { + // BEGIN: readme-sample-uploadDataToStorage String uploadText = "default"; InputStream data = new ByteArrayInputStream(uploadText.getBytes(StandardCharsets.UTF_8)); fileClient.upload(data, uploadText.length()); + // END: readme-sample-uploadDataToStorage + } + + public void uploadDataToStorageBiggerThan4MB() { + // BEGIN: readme-sample-uploadDataToStorageBiggerThan4MB + byte[] data = "Hello, data sample!".getBytes(StandardCharsets.UTF_8); + + long chunkSize = ShareFileAsyncClient.FILE_DEFAULT_BLOCK_SIZE; + if (data.length > chunkSize) { + for (int offset = 0; offset < data.length; offset += chunkSize) { + try { + // the last chunk size is smaller than the others + chunkSize = Math.min(data.length - offset, chunkSize); + + // select the chunk in the byte array + byte[] subArray = Arrays.copyOfRange(data, offset, (int) (offset + chunkSize)); + + // upload the chunk + fileClient.uploadWithResponse(new ByteArrayInputStream(subArray), chunkSize, (long) offset, null, Context.NONE); + } catch (RuntimeException e) { + logger.error("Failed to upload the file", e); + if (Boolean.TRUE.equals(fileClient.exists())) { + fileClient.delete(); + } + throw e; + } + } + } else { + fileClient.upload(new ByteArrayInputStream(data), data.length); + } + // END: readme-sample-uploadDataToStorageBiggerThan4MB } public void uploadFileToStorage() { + // BEGIN: readme-sample-uploadFileToStorage String filePath = "${myLocalFilePath}"; fileClient.uploadFromFile(filePath); + // END: readme-sample-uploadFileToStorage } public void downloadDataFromFileRange() { + // BEGIN: readme-sample-downloadDataFromFileRange ShareFileRange fileRange = new ShareFileRange(0L, 2048L); OutputStream stream = new ByteArrayOutputStream(); fileClient.downloadWithResponse(stream, fileRange, false, null, Context.NONE); + // END: readme-sample-downloadDataFromFileRange } public void downloadFileFromFileRange() { + // BEGIN: readme-sample-downloadFileFromFileRange String filePath = "${myLocalFilePath}"; fileClient.downloadToFile(filePath); + // END: readme-sample-downloadFileFromFileRange } - + public void getShareServiceProperties() { + // BEGIN: readme-sample-getShareServiceProperties shareServiceClient.getProperties(); + // END: readme-sample-getShareServiceProperties } public void setShareServiceProperties() { + // BEGIN: readme-sample-setShareServiceProperties ShareServiceProperties properties = shareServiceClient.getProperties(); - properties.getMinuteMetrics().setEnabled(true).setIncludeApis(true); + properties.getMinuteMetrics().setEnabled(true).setIncludeApis(true); properties.getHourMetrics().setEnabled(true).setIncludeApis(true); shareServiceClient.setProperties(properties); + // END: readme-sample-setShareServiceProperties } public void setShareMetadata() { + // BEGIN: readme-sample-setShareMetadata Map metadata = Collections.singletonMap("directory", "metadata"); shareClient.setMetadata(metadata); + // END: readme-sample-setShareMetadata } public void getAccessPolicy() { + // BEGIN: readme-sample-getAccessPolicy shareClient.getAccessPolicy(); + // END: readme-sample-getAccessPolicy } public void setAccessPolicy() { + // BEGIN: readme-sample-setAccessPolicy ShareAccessPolicy accessPolicy = new ShareAccessPolicy().setPermissions("r") .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC)) .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10)); ShareSignedIdentifier permission = new ShareSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy); shareClient.setAccessPolicy(Collections.singletonList(permission)); + // END: readme-sample-setAccessPolicy } - public void getHaHandleList() { + public void getHandleList() { + // BEGIN: readme-sample-getHandleList PagedIterable handleItems = directoryClient.listHandles(null, true, Duration.ofSeconds(30), Context.NONE); + // END: readme-sample-getHandleList } public void forceCloseHandleWithResponse() { + // BEGIN: readme-sample-forceCloseHandleWithResponse PagedIterable handleItems = directoryClient.listHandles(null, true, Duration.ofSeconds(30), Context.NONE); String handleId = handleItems.iterator().next().getHandleId(); directoryClient.forceCloseHandleWithResponse(handleId, Duration.ofSeconds(30), Context.NONE); + // END: readme-sample-forceCloseHandleWithResponse } public void setQuotaOnShare() { + // BEGIN: readme-sample-setQuotaOnShare int quotaOnGB = 1; shareClient.setPropertiesWithResponse(new ShareSetPropertiesOptions().setQuotaInGb(quotaOnGB), null, Context.NONE); + // END: readme-sample-setQuotaOnShare } public void setFileHttpHeaders() { + // BEGIN: readme-sample-setFileHttpHeaders ShareFileHttpHeaders httpHeaders = new ShareFileHttpHeaders().setContentType("text/plain"); fileClient.setProperties(1024, httpHeaders, null, null); - } - - public void handleException() { - try { - shareServiceClient.createShare("myShare"); - } catch (ShareStorageException e) { - logger.error("Failed to create a share with error code: " + e.getErrorCode()); - } - } - - public void uploadDataToStorageBiggerThan4MB() { - byte[] data = "Hello, data sample!".getBytes(StandardCharsets.UTF_8); - - long chunkSize = ShareFileAsyncClient.FILE_DEFAULT_BLOCK_SIZE; - if (data.length > chunkSize) { - for (int offset = 0; offset < data.length; offset += chunkSize) { - try { - // the last chunk size is smaller than the others - chunkSize = Math.min(data.length - offset, chunkSize); - - // select the chunk in the byte array - byte[] subArray = Arrays.copyOfRange(data, offset, (int) (offset + chunkSize)); - - // upload the chunk - fileClient.uploadWithResponse(new ByteArrayInputStream(subArray), chunkSize, (long) offset, null, Context.NONE); - } catch (RuntimeException e) { - logger.error("Failed to upload the file", e); - if (Boolean.TRUE.equals(fileClient.exists())) { - fileClient.delete(); - } - throw e; - } - } - } else { - fileClient.upload(new ByteArrayInputStream(data), data.length); - } + // END: readme-sample-setFileHttpHeaders } } diff --git a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueAsyncClient.java b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueAsyncClient.java index d3a3a28ed72f5..b24e1def287b4 100644 --- a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueAsyncClient.java +++ b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueAsyncClient.java @@ -71,7 +71,14 @@ * *

Instantiating an Asynchronous Queue Client

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.instantiation} + * + *
+ * QueueAsyncClient client = new QueueClientBuilder()
+ *     .connectionString("connectionstring")
+ *     .endpoint("endpoint")
+ *     .buildAsyncClient();
+ * 
+ * * *

View {@link QueueClientBuilder this} for additional ways to construct the client.

* @@ -153,7 +160,16 @@ public HttpPipeline getHttpPipeline() { * *

Create a queue

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} + * + *
+     * client.create().subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete creating the queue!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -177,7 +193,14 @@ public Mono create() { * *

Create a queue with metadata "queue:metadataMap"

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.createWithResponse#map} + * + *
+     * client.createWithResponse(Collections.singletonMap("queue", "metadataMap")).subscribe(
+     *     response -> System.out.println("Complete creating the queue with status code:" + response.getStatusCode()),
+     *     error -> System.err.print(error.toString())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -211,7 +234,13 @@ Mono> createWithResponse(Map metadata, Context co * *

Delete a queue

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} + * + *
+     * client.delete().doOnSuccess(
+     *     response -> System.out.println("Deleting the queue completed.")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -235,7 +264,13 @@ public Mono delete() { * *

Delete a queue

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteWithResponse} + * + *
+     * client.deleteWithResponse().subscribe(
+     *     response -> System.out.println("Deleting the queue completed with status code: " + response.getStatusCode())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -266,7 +301,15 @@ Mono> deleteWithResponse(Context context) { * *

Get the properties of the queue

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} + * + *
+     * client.getProperties()
+     *     .subscribe(properties -> {
+     *         System.out.printf("Metadata: %s, Approximate message count: %d", properties.getMetadata(),
+     *             properties.getApproximateMessagesCount());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -291,7 +334,16 @@ public Mono getProperties() { * *

Get the properties of the queue

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.getPropertiesWithResponse} + * + *
+     * client.getPropertiesWithResponse()
+     *     .subscribe(response -> {
+     *         QueueProperties properties = response.getValue();
+     *         System.out.printf("Metadata: %s, Approximate message count: %d", properties.getMetadata(),
+     *             properties.getApproximateMessagesCount());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -325,11 +377,21 @@ Mono> getPropertiesWithResponse(Context context) { * *

Set the queue's metadata to "queue:metadataMap"

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata#map} + * + *
+     * client.setMetadata(Collections.singletonMap("queue", "metadataMap"))
+     *     .subscribe(response -> System.out.println("Setting metadata completed."));
+     * 
+ * * *

Clear the queue's metadata

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata#map} + * + *
+     * client.setMetadata(null)
+     *     .subscribe(response -> System.out.println("Clearing metadata completed."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -356,11 +418,23 @@ public Mono setMetadata(Map metadata) { * *

Set the queue's metadata to "queue:metadataMap"

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadataWithResponse#map} + * + *
+     * client.setMetadataWithResponse(Collections.singletonMap("queue", "metadataMap"))
+     *     .subscribe(response -> System.out.printf("Setting metadata completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

Clear the queue's metadata

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadataWithResponse#map} + * + *
+     * client.setMetadataWithResponse(null)
+     *     .subscribe(response -> System.out.printf("Clearing metadata completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -393,7 +467,13 @@ Mono> setMetadataWithResponse(Map metadata, Conte * *

List the stored access policies

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} + * + *
+     * client.getAccessPolicy()
+     *     .subscribe(result -> System.out.printf("Access policy %s allows these permissions: %s",
+     *         result.getId(), result.getAccessPolicy().getPermissions()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -427,7 +507,17 @@ public PagedFlux getAccessPolicy() { * *

Set a read only stored access policy

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.setAccessPolicy#Iterable} + * + *
+     * QueueAccessPolicy accessPolicy = new QueueAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     *
+     * QueueSignedIdentifier permission = new QueueSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     * client.setAccessPolicy(Collections.singletonList(permission))
+     *     .subscribe(response -> System.out.println("Setting access policies completed."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -453,7 +543,18 @@ public Mono setAccessPolicy(Iterable permissions) { * *

Set a read only stored access policy

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.setAccessPolicyWithResponse#Iterable} + * + *
+     * QueueAccessPolicy accessPolicy = new QueueAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     *
+     * QueueSignedIdentifier permission = new QueueSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     * client.setAccessPolicyWithResponse(Collections.singletonList(permission))
+     *     .subscribe(response -> System.out.printf("Setting access policies completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -509,7 +610,12 @@ OffsetDateTime.now will only give back milliseconds (more precise fields are zer * *

Clear the messages

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} + * + *
+     * client.clearMessages().subscribe(
+     *     response -> System.out.println("Clearing messages completed."));
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -533,7 +639,13 @@ public Mono clearMessages() { * *

Clear the messages

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessagesWithResponse} + * + *
+     * client.clearMessagesWithResponse().doOnSuccess(
+     *     response -> System.out.println("Clearing messages completed with status code: " + response.getStatusCode())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -564,7 +676,16 @@ Mono> clearMessagesWithResponse(Context context) { * *

Enqueue a message of "Hello, Azure"

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessage#string} + * + *
+     * client.sendMessage("Hello, Azure").subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete enqueuing the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -591,7 +712,16 @@ public Mono sendMessage(String messageText) { * *

Enqueue a message of "Hello, Azure"

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessage#BinaryData} + * + *
+     * client.sendMessage(BinaryData.fromString("Hello, Azure")).subscribe(
+     *         response -> {
+     *         },
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete enqueuing the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -614,11 +744,31 @@ public Mono sendMessage(BinaryData message) { * *

Add a message of "Hello, Azure" that has a timeout of 5 seconds

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessageWithResponse#string-duration-duration} + * + *
+     * client.sendMessageWithResponse("Hello, Azure",
+     *     Duration.ofSeconds(5), null).subscribe(
+     *         response -> System.out.printf("Message %s expires at %s", response.getValue().getMessageId(),
+     *             response.getValue().getExpirationTime()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete enqueuing the message!")
+     * );
+     * 
+ * * *

Add a message of "Goodbye, Azure" that has a time to live of 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.sendMessageWithResponse-liveTime#String-Duration-Duration} + * + *
+     * client.sendMessageWithResponse("Goodbye, Azure",
+     *     null, Duration.ofSeconds(5)).subscribe(
+     *         response -> System.out.printf("Message %s expires at %s", response.getValue().getMessageId(),
+     *             response.getValue().getExpirationTime()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete enqueuing the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -654,11 +804,31 @@ public Mono> sendMessageWithResponse(String messageT * *

Add a message of "Hello, Azure" that has a timeout of 5 seconds

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.sendMessageWithResponse#BinaryData-duration-duration} + * + *
+     * client.sendMessageWithResponse(BinaryData.fromString("Hello, Azure"),
+     *         Duration.ofSeconds(5), null).subscribe(
+     *         response -> System.out.printf("Message %s expires at %s", response.getValue().getMessageId(),
+     *             response.getValue().getExpirationTime()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete enqueuing the message!")
+     * );
+     * 
+ * * *

Add a message of "Goodbye, Azure" that has a time to live of 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.sendMessageWithResponse-liveTime#BinaryData-Duration-Duration} + * + *
+     * client.sendMessageWithResponse(BinaryData.fromString("Goodbye, Azure"),
+     *         null, Duration.ofSeconds(5)).subscribe(
+     *         response -> System.out.printf("Message %s expires at %s", response.getValue().getMessageId(),
+     *             response.getValue().getExpirationTime()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete enqueuing the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -723,7 +893,16 @@ private Mono encodeMessage(BinaryData message) { * *

Dequeue a message

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.receiveMessage} + * + *
+     * client.receiveMessage().subscribe(
+     *     message -> System.out.println("The message got from getMessages operation: "
+     *         + message.getBody().toString()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete receiving the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -750,7 +929,16 @@ public Mono receiveMessage() { * *

Dequeue up to 5 messages

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.receiveMessages#integer} + * + *
+     * client.receiveMessages(5).subscribe(
+     *     message -> System.out.println("The message got from getMessages operation: "
+     *         + message.getBody().toString()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete receiving the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -781,7 +969,17 @@ public PagedFlux receiveMessages(Integer maxMessages) { * *

Dequeue up to 5 messages and give them a 60 second timeout period

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.receiveMessages#integer-duration} + * + *
+     * client.receiveMessages(5, Duration.ofSeconds(60))
+     *     .subscribe(
+     *         message -> System.out.println("The message got from getMessages operation: "
+     *             + message.getBody().toString()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete receiving the message!")
+     *     );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -909,7 +1107,16 @@ private Mono decodeMessageBody(String messageText, QueueMessageEncod * *

Peek the first message

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessage} + * + *
+     * client.peekMessage().subscribe(
+     *     peekMessages -> System.out.println("The message got from peek operation: "
+     *         + peekMessages.getBody().toString()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete peeking the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -935,7 +1142,16 @@ public Mono peekMessage() { * *

Peek up to the first five messages

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages#integer} + * + *
+     * client.peekMessages(5).subscribe(
+     *     peekMessage -> System.out.printf("Peeked message %s has been received %d times",
+     *         peekMessage.getMessageId(), peekMessage.getDequeueCount()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete peeking the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1031,7 +1247,23 @@ private Mono transformPeekedMessageItemInternal( * *

Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.updateMessage#String-String-String-Duration} + * + *
+     * client.receiveMessage().subscribe(
+     *     message -> {
+     *         client.updateMessage("newText", message.getMessageId(),
+     *             message.getPopReceipt(), null).subscribe(
+     *                 response -> {
+     *                 },
+     *                 updateError -> System.err.print(updateError.toString()),
+     *                 () -> System.out.println("Complete updating the message!")
+     *         );
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete receiving the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1065,7 +1297,24 @@ public Mono updateMessage(String messageId, String popRecei * *

Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.updateMessageWithResponse#String-String-String-Duration} + * + *
+     *
+     * client.receiveMessage().subscribe(
+     *     message -> {
+     *         client.updateMessageWithResponse(message.getMessageId(), message.getPopReceipt(), "newText",
+     *             null).subscribe(
+     *                 response -> System.out.println("Complete updating the message with status code:"
+     *                     + response.getStatusCode()),
+     *                 updateError -> System.err.print(updateError.toString()),
+     *                 () -> System.out.println("Complete updating the message!")
+     *         );
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete receiving the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1110,7 +1359,22 @@ Mono> updateMessageWithResponse(String messageId, * *

Delete the first message

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.deleteMessage#String-String} + * + *
+     * client.receiveMessage().subscribe(
+     *     message -> {
+     *         client.deleteMessage(message.getMessageId(), message.getPopReceipt()).subscribe(
+     *             response -> {
+     *             },
+     *             deleteError -> System.err.print(deleteError.toString()),
+     *             () -> System.out.println("Complete deleting the message!")
+     *         );
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete receiving the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1137,7 +1401,23 @@ public Mono deleteMessage(String messageId, String popReceipt) { * *

Delete the first message

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.deleteMessageWithResponse#String-String} + * + *
+     * client.receiveMessage().subscribe(
+     *     message -> {
+     *         client.deleteMessageWithResponse(message.getMessageId(), message.getPopReceipt())
+     *             .subscribe(
+     *                 response -> System.out.println("Complete deleting the message with status code: "
+     *                     + response.getStatusCode()),
+     *                 deleteError -> System.err.print(deleteError.toString()),
+     *                 () -> System.out.println("Complete deleting the message!")
+     *             );
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete receiving the message!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -1170,7 +1450,12 @@ Mono> deleteMessageWithResponse(String messageId, String popRecei * *

Code Samples

* - * {@codesnippet com.azure.storage.queue.queueAsyncClient.getQueueName} + * + *
+     * String queueName = client.getQueueName();
+     * System.out.println("The name of the queue is " + queueName);
+     * 
+ * * * @return The name of the queue. */ @@ -1195,7 +1480,17 @@ public String getAccountName() { * *

Code Samples

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.generateSas#QueueServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * QueueSasPermission permission = new QueueSasPermission().setReadPermission(true);
+     *
+     * QueueServiceSasSignatureValues values = new QueueServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param queueServiceSasSignatureValues {@link QueueServiceSasSignatureValues} * @@ -1212,7 +1507,18 @@ public String generateSas(QueueServiceSasSignatureValues queueServiceSasSignatur * *

Code Samples

* - * {@codesnippet com.azure.storage.queue.QueueAsyncClient.generateSas#QueueServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * QueueSasPermission permission = new QueueSasPermission().setReadPermission(true);
+     *
+     * QueueServiceSasSignatureValues values = new QueueServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param queueServiceSasSignatureValues {@link QueueServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueClient.java b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueClient.java index 3875247b4ac68..69618e422b663 100644 --- a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueClient.java +++ b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueClient.java @@ -33,7 +33,14 @@ * *

Instantiating an Synchronous Queue Client

* - * {@codesnippet com.azure.storage.queue.queueClient.instantiation} + * + *
+ * QueueClient client = new QueueClientBuilder()
+ *     .connectionString("connectionstring")
+ *     .endpoint("endpoint")
+ *     .buildClient();
+ * 
+ * * *

View {@link QueueClientBuilder this} for additional ways to construct the client.

* @@ -95,7 +102,12 @@ public HttpPipeline getHttpPipeline() { * *

Create a queue

* - * {@codesnippet com.azure.storage.queue.queueClient.create} + * + *
+     * client.create();
+     * System.out.println("Complete creating queue.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -114,7 +126,13 @@ public void create() { * *

Create a queue with metadata "queue:metadataMap"

* - * {@codesnippet com.azure.storage.queue.queueClient.createWithResponse#map-duration-context} + * + *
+     * Response<Void> response = client.createWithResponse(Collections.singletonMap("queue", "metadataMap"),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete creating queue with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -142,7 +160,12 @@ public Response createWithResponse(Map metadata, Duration * *

Delete a queue

* - * {@codesnippet com.azure.storage.queue.queueClient.delete} + * + *
+     * client.delete();
+     * System.out.println("Complete deleting the queue.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -161,7 +184,12 @@ public void delete() { * *

Delete a queue

* - * {@codesnippet com.azure.storage.queue.queueClient.deleteWithResponse#duration-context} + * + *
+     * Response<Void> response = client.deleteWithResponse(Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete deleting the queue with status code: " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -186,7 +214,13 @@ public Response deleteWithResponse(Duration timeout, Context context) { * *

Get the properties of the queue

* - * {@codesnippet com.azure.storage.queue.queueClient.getProperties} + * + *
+     * QueueProperties properties = client.getProperties();
+     * System.out.printf("Metadata: %s, Approximate message count: %d", properties.getMetadata(),
+     *     properties.getApproximateMessagesCount());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -207,7 +241,14 @@ public QueueProperties getProperties() { * *

Get the properties of the queue

* - * {@codesnippet com.azure.storage.queue.queueClient.getPropertiesWithResponse#duration-context} + * + *
+     * QueueProperties properties = client.getPropertiesWithResponse(Duration.ofSeconds(1),
+     *     new Context(key1, value1)).getValue();
+     * System.out.printf("Metadata: %s, Approximate message count: %d", properties.getMetadata(),
+     *     properties.getApproximateMessagesCount());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -235,11 +276,21 @@ public Response getPropertiesWithResponse(Duration timeout, Con * *

Set the queue's metadata to "queue:metadataMap"

* - * {@codesnippet com.azure.storage.queue.queueClient.setMetadata#map} + * + *
+     * client.setMetadata(Collections.singletonMap("queue", "metadataMap"));
+     * System.out.println("Setting metadata completed.");
+     * 
+ * * *

Clear the queue's metadata

* - * {@codesnippet com.azure.storage.queue.queueClient.clearMetadata#map} + * + *
+     * client.setMetadata(null);
+     * System.out.println("Clearing metadata completed.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -261,11 +312,23 @@ public void setMetadata(Map metadata) { * *

Set the queue's metadata to "queue:metadataMap"

* - * {@codesnippet com.azure.storage.queue.queueClient.setMetadataWithResponse#map-duration-context} + * + *
+     * client.setMetadataWithResponse(Collections.singletonMap("queue", "metadataMap"),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Setting metadata completed.");
+     * 
+ * * *

Clear the queue's metadata

* - * {@codesnippet com.azure.storage.queue.queueClient.clearMetadataWithResponse#map-duration-context} + * + *
+     * Response<Void> response = client.setMetadataWithResponse(null, Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Clearing metadata completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -291,7 +354,14 @@ public Response setMetadataWithResponse(Map metadata, Dura * *

List the stored access policies

* - * {@codesnippet com.azure.storage.queue.queueClient.getAccessPolicy} + * + *
+     * for (QueueSignedIdentifier permission : client.getAccessPolicy()) {
+     *     System.out.printf("Access policy %s allows these permissions: %s", permission.getId(),
+     *         permission.getAccessPolicy().getPermissions());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -311,7 +381,16 @@ public PagedIterable getAccessPolicy() { * *

Set a read only stored access policy

* - * {@codesnippet com.azure.storage.queue.QueueClient.setAccessPolicy#List} + * + *
+     * QueueAccessPolicy accessPolicy = new QueueAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     * QueueSignedIdentifier permission = new QueueSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     * client.setAccessPolicy(Collections.singletonList(permission));
+     * System.out.println("Setting access policies completed.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -332,7 +411,17 @@ public void setAccessPolicy(List permissions) { * *

Set a read only stored access policy

* - * {@codesnippet com.azure.storage.queue.queueClient.setAccessPolicyWithResponse#List-Duration-Context} + * + *
+     * QueueAccessPolicy accessPolicy = new QueueAccessPolicy().setPermissions("r")
+     *     .setStartsOn(OffsetDateTime.now(ZoneOffset.UTC))
+     *     .setExpiresOn(OffsetDateTime.now(ZoneOffset.UTC).plusDays(10));
+     * QueueSignedIdentifier permission = new QueueSignedIdentifier().setId("mypolicy").setAccessPolicy(accessPolicy);
+     * Response<Void> response = client.setAccessPolicyWithResponse(Collections.singletonList(permission),
+     *     Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Setting access policies completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -360,7 +449,12 @@ public Response setAccessPolicyWithResponse(List pe * *

Clear the messages

* - * {@codesnippet com.azure.storage.queue.queueClient.clearMessages} + * + *
+     * client.clearMessages();
+     * System.out.println("Clearing messages completed.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -379,7 +473,12 @@ public void clearMessages() { * *

Clear the messages

* - * {@codesnippet com.azure.storage.queue.queueClient.clearMessagesWithResponse#duration-context} + * + *
+     * Response<Void> response = client.clearMessagesWithResponse(Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.printf("Clearing messages completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -404,7 +503,12 @@ public Response clearMessagesWithResponse(Duration timeout, Context contex * *

Sends a message of "Hello, Azure"

* - * {@codesnippet com.azure.storage.queue.queueClient.sendMessage#string} + * + *
+     * SendMessageResult response = client.sendMessage("hello msg");
+     * System.out.println("Complete enqueuing the message with message Id" + response.getMessageId());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -427,7 +531,12 @@ public SendMessageResult sendMessage(String messageText) { * *

Sends a message of "Hello, Azure"

* - * {@codesnippet com.azure.storage.queue.queueClient.sendMessage#BinaryData} + * + *
+     * SendMessageResult response = client.sendMessage(BinaryData.fromString("Hello msg"));
+     * System.out.println("Complete enqueuing the message with message Id" + response.getMessageId());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -450,11 +559,25 @@ public SendMessageResult sendMessage(BinaryData message) { * *

Add a message of "Hello, Azure" that has a timeout of 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueClient.sendMessageWithResponse#String-Duration-Duration-Duration-Context1} + * + *
+     * SendMessageResult sentMessageItem = client.sendMessageWithResponse("Hello, Azure",
+     *     Duration.ofSeconds(5), null, Duration.ofSeconds(1), new Context(key1, value1)).getValue();
+     * System.out.printf("Message %s expires at %s", sentMessageItem.getMessageId(),
+     *     sentMessageItem.getExpirationTime());
+     * 
+ * * *

Add a message of "Goodbye, Azure" that has a time to live of 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueClient.sendMessageWithResponse#String-Duration-Duration-Duration-Context2} + * + *
+     * SendMessageResult enqueuedMessage = client.sendMessageWithResponse("Goodbye, Azure",
+     *     null, Duration.ofSeconds(5), Duration.ofSeconds(1), new Context(key1, value1)).getValue();
+     * System.out.printf("Message %s expires at %s", enqueuedMessage.getMessageId(),
+     *     enqueuedMessage.getExpirationTime());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -492,11 +615,25 @@ public Response sendMessageWithResponse(String messageText, D * *

Add a message of "Hello, Azure" that has a timeout of 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueClient.sendMessageWithResponse#BinaryData-Duration-Duration-Duration-Context1} + * + *
+     * SendMessageResult sentMessageItem = client.sendMessageWithResponse(BinaryData.fromString("Hello, Azure"),
+     *     Duration.ofSeconds(5), null, Duration.ofSeconds(1), new Context(key1, value1)).getValue();
+     * System.out.printf("Message %s expires at %s", sentMessageItem.getMessageId(),
+     *     sentMessageItem.getExpirationTime());
+     * 
+ * * *

Add a message of "Goodbye, Azure" that has a time to live of 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueClient.sendMessageWithResponse#BinaryData-Duration-Duration-Duration-Context2} + * + *
+     * SendMessageResult enqueuedMessage = client.sendMessageWithResponse(BinaryData.fromString("Goodbye, Azure"),
+     *     null, Duration.ofSeconds(5), Duration.ofSeconds(1), new Context(key1, value1)).getValue();
+     * System.out.printf("Message %s expires at %s", enqueuedMessage.getMessageId(),
+     *     enqueuedMessage.getExpirationTime());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -534,7 +671,12 @@ public Response sendMessageWithResponse(BinaryData message, D * *

Receive a message

* - * {@codesnippet com.azure.storage.queue.queueClient.receiveMessage} + * + *
+     * QueueMessageItem queueMessageItem = client.receiveMessage();
+     * System.out.println("Complete receiving the message: " + queueMessageItem.getMessageId());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -558,7 +700,14 @@ public QueueMessageItem receiveMessage() { * *

Receive up to 5 messages

* - * {@codesnippet com.azure.storage.queue.queueClient.receiveMessages#integer} + * + *
+     * for (QueueMessageItem message : client.receiveMessages(5)) {
+     *     System.out.printf("Received %s and it becomes visible at %s",
+     *         message.getMessageId(), message.getTimeNextVisible());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -585,7 +734,15 @@ public PagedIterable receiveMessages(Integer maxMessages) { * *

Receive up to 5 messages and give them a 60 second timeout period

* - * {@codesnippet com.azure.storage.queue.queueClient.receiveMessages#integer-duration-duration-context} + * + *
+     * for (QueueMessageItem message : client.receiveMessages(5, Duration.ofSeconds(60),
+     *     Duration.ofSeconds(1), new Context(key1, value1))) {
+     *     System.out.printf("Received %s and it becomes visible at %s",
+     *         message.getMessageId(), message.getTimeNextVisible());
+     * }
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -623,7 +780,12 @@ public PagedIterable receiveMessages(Integer maxMessages, Dura * *

Peek the first message

* - * {@codesnippet com.azure.storage.queue.queueClient.peekMessage} + * + *
+     * PeekedMessageItem peekedMessageItem = client.peekMessage();
+     * System.out.println("Complete peeking the message: " + peekedMessageItem.getBody().toString());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -645,7 +807,14 @@ public PeekedMessageItem peekMessage() { * *

Peek up to the first five messages

* - * {@codesnippet com.azure.storage.queue.queueClient.peekMessages#integer-duration-context} + * + *
+     * client.peekMessages(5, Duration.ofSeconds(1), new Context(key1, value1)).forEach(
+     *     peekMessage -> System.out.printf("Peeked message %s has been received %d times",
+     *         peekMessage.getMessageId(), peekMessage.getDequeueCount())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -673,7 +842,14 @@ public PagedIterable peekMessages(Integer maxMessages, Durati * *

Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueClient.updateMessage#String-String-String-Duration} + * + *
+     * QueueMessageItem queueMessageItem = client.receiveMessage();
+     * UpdateMessageResult result = client.updateMessage(queueMessageItem.getMessageId(),
+     *     queueMessageItem.getPopReceipt(), "newText", null);
+     * System.out.println("Complete updating the message with the receipt " + result.getPopReceipt());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -703,7 +879,15 @@ public UpdateMessageResult updateMessage(String messageId, String popReceipt, St * *

Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds

* - * {@codesnippet com.azure.storage.queue.QueueClient.updateMessageWithResponse#String-String-String-Duration-Duration-Context} + * + *
+     * QueueMessageItem queueMessageItem = client.receiveMessage();
+     * Response<UpdateMessageResult> response = client.updateMessageWithResponse(queueMessageItem.getMessageId(),
+     *     queueMessageItem.getPopReceipt(), "newText", null, Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.println("Complete updating the message with status code " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -738,7 +922,13 @@ public Response updateMessageWithResponse(String messageId, * *

Delete the first message

* - * {@codesnippet com.azure.storage.queue.QueueClient.deleteMessage#String-String} + * + *
+     * QueueMessageItem queueMessageItem = client.receiveMessage();
+     * client.deleteMessage(queueMessageItem.getMessageId(), queueMessageItem.getPopReceipt());
+     * System.out.println("Complete deleting the message.");
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -760,7 +950,14 @@ public void deleteMessage(String messageId, String popReceipt) { * *

Delete the first message

* - * {@codesnippet com.azure.storage.queue.QueueClient.deleteMessageWithResponse#String-String-Duration-Context} + * + *
+     * QueueMessageItem queueMessageItem = client.receiveMessage();
+     * Response<Void> response = client.deleteMessageWithResponse(queueMessageItem.getMessageId(),
+     *     queueMessageItem.getPopReceipt(), Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete deleting the message with status code " + response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -787,7 +984,12 @@ public Response deleteMessageWithResponse(String messageId, String popRece * *

Code Samples

* - * {@codesnippet com.azure.storage.queue.queueClient.getQueueName} + * + *
+     * String queueName = client.getQueueName();
+     * System.out.println("The name of the queue is " + queueName);
+     * 
+ * * * @return The name of the queue. */ @@ -812,7 +1014,17 @@ public String getAccountName() { * *

Code Samples

* - * {@codesnippet com.azure.storage.queue.QueueClient.generateSas#QueueServiceSasSignatureValues} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * QueueSasPermission permission = new QueueSasPermission().setReadPermission(true);
+     *
+     * QueueServiceSasSignatureValues values = new QueueServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * client.generateSas(values); // Client must be authenticated via StorageSharedKeyCredential
+     * 
+ * * * @param queueServiceSasSignatureValues {@link QueueServiceSasSignatureValues} * @@ -829,7 +1041,18 @@ public String generateSas(QueueServiceSasSignatureValues queueServiceSasSignatur * *

Code Samples

* - * {@codesnippet com.azure.storage.queue.QueueClient.generateSas#QueueServiceSasSignatureValues-Context} + * + *
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plusDays(1);
+     * QueueSasPermission permission = new QueueSasPermission().setReadPermission(true);
+     *
+     * QueueServiceSasSignatureValues values = new QueueServiceSasSignatureValues(expiryTime, permission)
+     *     .setStartTime(OffsetDateTime.now());
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * client.generateSas(values, new Context("key", "value"));
+     * 
+ * * * @param queueServiceSasSignatureValues {@link QueueServiceSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueClientBuilder.java b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueClientBuilder.java index 8a456edc80323..3c818776de8d8 100644 --- a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueClientBuilder.java +++ b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueClientBuilder.java @@ -45,19 +45,47 @@ * client.

* *

Instantiating a synchronous Queue Client with SAS token

- * {@codesnippet com.azure.storage.queue.queueClient.instantiation.sastoken} + * + *
+ * QueueClient client = new QueueClientBuilder()
+ *     .endpoint("https://${accountName}.queue.core.windows.net?${SASToken}")
+ *     .buildClient();
+ * 
+ * * *

Instantiating an Asynchronous Queue Client with SAS token

- * {@codesnippet com.azure.storage.queue.queueAsyncClient.instantiation.sastoken} + * + *
+ * QueueAsyncClient queueAsyncClient = new QueueClientBuilder()
+ *     .endpoint("https://{accountName}.queue.core.windows.net?{SASToken}")
+ *     .buildAsyncClient();
+ * 
+ * * *

If the {@code endpoint} doesn't contain the queue name or {@code SAS token} they may be set using * {@link QueueClientBuilder#queueName(String) queueName} and {@link QueueClientBuilder#sasToken(String) SAS token}.

* *

Instantiating a synchronous Queue Client with credential

- * {@codesnippet com.azure.storage.queue.queueClient.instantiation.credential} + * + *
+ * QueueClient client = new QueueClientBuilder()
+ *     .endpoint("https://${accountName}.queue.core.windows.net")
+ *     .queueName("myqueue")
+ *     .sasToken("{SASTokenQueryParams}")
+ *     .buildClient();
+ * 
+ * * *

Instantiating an Asynchronous Queue Client with credential

- * {@codesnippet com.azure.storage.queue.queueAsyncClient.instantiation.credential} + * + *
+ * QueueAsyncClient queueAsyncClient = new QueueClientBuilder()
+ *     .endpoint("https://{accountName}.queue.core.windows.net")
+ *     .queueName("myqueue")
+ *     .sasToken("{SASTokenQueryParams}")
+ *     .buildAsyncClient();
+ * 
+ * * *

Another way to authenticate the client is using a {@link StorageSharedKeyCredential}. To create a * StorageSharedKeyCredential a connection string from the Storage Queue service must be used. @@ -66,10 +94,26 @@ * when authorizing requests sent to the service.

* *

Instantiating a synchronous Queue Client with connection string.

- * {@codesnippet com.azure.storage.queue.queueClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};"
+ *     + "AccountKey={key};EndpointSuffix={core.windows.net}";
+ * QueueClient client = new QueueClientBuilder()
+ *     .connectionString(connectionString)
+ *     .buildClient();
+ * 
+ * * *

Instantiating an Asynchronous Queue Client with connection string.

- * {@codesnippet com.azure.storage.queue.queueAsyncClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};"
+ *     + "AccountKey={key};EndpointSuffix={core.windows.net}";
+ * QueueAsyncClient queueAsyncClient = new QueueClientBuilder()
+ *     .connectionString(connectionString)
+ *     .buildAsyncClient();
+ * 
+ * * * @see QueueClient * @see QueueAsyncClient @@ -434,7 +478,38 @@ public QueueClientBuilder messageEncoding(QueueMessageEncoding messageEncoding) * handler itself. *

Code Samples

* - * {@codesnippet com.azure.storage.queue.QueueClientBuilder#processMessageDecodingErrorAsyncHandler} + * + *
+     * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};"
+     *     + "AccountKey={key};EndpointSuffix={core.windows.net}";
+     *
+     * Function<QueueMessageDecodingError, Mono<Void>> processMessageDecodingErrorHandler =
+     *     (queueMessageDecodingFailure) -> {
+     *         QueueMessageItem queueMessageItem = queueMessageDecodingFailure.getQueueMessageItem();
+     *         PeekedMessageItem peekedMessageItem = queueMessageDecodingFailure.getPeekedMessageItem();
+     *         if (queueMessageItem != null) {
+     *             System.out.printf("Received badly encoded message, messageId=%s, messageBody=%s",
+     *                 queueMessageItem.getMessageId(),
+     *                 queueMessageItem.getBody().toString());
+     *             return queueMessageDecodingFailure
+     *                 .getQueueAsyncClient()
+     *                 .deleteMessage(queueMessageItem.getMessageId(), queueMessageItem.getPopReceipt());
+     *         } else if (peekedMessageItem != null) {
+     *             System.out.printf("Peeked badly encoded message, messageId=%s, messageBody=%s",
+     *                 peekedMessageItem.getMessageId(),
+     *                 peekedMessageItem.getBody().toString());
+     *             return Mono.empty();
+     *         } else {
+     *             return Mono.empty();
+     *         }
+     *     };
+     *
+     * QueueClient client = new QueueClientBuilder()
+     *     .connectionString(connectionString)
+     *     .processMessageDecodingErrorAsync(processMessageDecodingErrorHandler)
+     *     .buildClient();
+     * 
+ * * * @param processMessageDecodingErrorAsyncHandler the handler. * @return the updated QueueClientBuilder object @@ -462,7 +537,35 @@ public QueueClientBuilder processMessageDecodingErrorAsync( * handler itself. *

Code Samples

* - * {@codesnippet com.azure.storage.queue.QueueClientBuilder#processMessageDecodingErrorHandler} + * + *
+     * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};"
+     *     + "AccountKey={key};EndpointSuffix={core.windows.net}";
+     *
+     * Consumer<QueueMessageDecodingError> processMessageDecodingErrorHandler =
+     *     (queueMessageDecodingFailure) -> {
+     *         QueueMessageItem queueMessageItem = queueMessageDecodingFailure.getQueueMessageItem();
+     *         PeekedMessageItem peekedMessageItem = queueMessageDecodingFailure.getPeekedMessageItem();
+     *         if (queueMessageItem != null) {
+     *             System.out.printf("Received badly encoded message, messageId=%s, messageBody=%s",
+     *                 queueMessageItem.getMessageId(),
+     *                 queueMessageItem.getBody().toString());
+     *             queueMessageDecodingFailure
+     *                 .getQueueClient()
+     *                 .deleteMessage(queueMessageItem.getMessageId(), queueMessageItem.getPopReceipt());
+     *         } else if (peekedMessageItem != null) {
+     *             System.out.printf("Peeked badly encoded message, messageId=%s, messageBody=%s",
+     *                 peekedMessageItem.getMessageId(),
+     *                 peekedMessageItem.getBody().toString());
+     *         }
+     *     };
+     *
+     * QueueClient client = new QueueClientBuilder()
+     *     .connectionString(connectionString)
+     *     .processMessageDecodingError(processMessageDecodingErrorHandler)
+     *     .buildClient();
+     * 
+ * * * @param processMessageDecodingErrorHandler the handler. * @return the updated QueueClientBuilder object diff --git a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceAsyncClient.java b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceAsyncClient.java index 0c1c420ee8b7f..e8c2175a98bc0 100644 --- a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceAsyncClient.java +++ b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceAsyncClient.java @@ -51,7 +51,14 @@ * *

Instantiating an Asynchronous Queue Service Client

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.instantiation} + * + *
+ * QueueServiceAsyncClient client = new QueueServiceClientBuilder()
+ *     .connectionString("connectionstring")
+ *     .endpoint("endpoint")
+ *     .buildAsyncClient();
+ * 
+ * * *

View {@link QueueServiceClientBuilder this} for additional ways to construct the client.

* @@ -132,7 +139,16 @@ public QueueAsyncClient getQueueAsyncClient(String queueName) { * *

Create the queue "test"

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.createQueue#string} + * + *
+     * client.createQueue("myqueue").subscribe(
+     *     response -> {
+     *     },
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete creating the queue!")
+     * );
+     * 
+ * * * @param queueName Name of the queue * @return The {@link QueueAsyncClient QueueAsyncClient} @@ -155,7 +171,16 @@ public Mono createQueue(String queueName) { * *

Create the queue "test" with metadata "queue:metadata"

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.createQueueWithResponse#string-map} + * + *
+     * client.createQueueWithResponse("myqueue", Collections.singletonMap("queue", "metadata"))
+     *     .subscribe(
+     *         response -> System.out.printf("Creating the queue with status code %d", response.getStatusCode()),
+     *         error -> System.err.print(error.toString()),
+     *         () -> System.out.println("Complete creating the queue!")
+     *     );
+     * 
+ * * * @param queueName Name of the queue * @param metadata Metadata to associate with the queue. If there is leading or trailing whitespace in any @@ -189,7 +214,13 @@ Mono> createQueueWithResponse(String queueName, MapDelete the queue "test"

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.deleteQueue#string} + * + *
+     * client.deleteQueue("myshare").subscribe(
+     *     response -> System.out.println("Deleting the queue completed.")
+     * );
+     * 
+ * * * @param queueName Name of the queue * @return An empty response @@ -211,7 +242,13 @@ public Mono deleteQueue(String queueName) { * *

Delete the queue "test"

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.deleteQueueWithResponse#string} + * + *
+     * client.deleteQueueWithResponse("myshare").subscribe(
+     *     response -> System.out.println("Deleting the queue completed with status code: " + response.getStatusCode())
+     * );
+     * 
+ * * * @param queueName Name of the queue * @return A response that only contains headers and response status code @@ -239,7 +276,15 @@ Mono> deleteQueueWithResponse(String queueName, Context context) * *

List all queues in the account

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.listQueues} + * + *
+     * client.listQueues().subscribe(
+     *     queueItem -> System.out.printf("Queue %s exists in the account", queueItem.getName()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete listing the queues!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -265,7 +310,16 @@ public PagedFlux listQueues() { * *

List all queues that begin with "azure"

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.listQueues#queueSergmentOptions} + * + *
+     * client.listQueues(new QueuesSegmentOptions().setPrefix("azure")).subscribe(
+     *     queueItem -> System.out.printf("Queue %s exists in the account and has metadata %s",
+     *         queueItem.getName(), queueItem.getMetadata()),
+     *     error -> System.err.print(error.toString()),
+     *     () -> System.out.println("Complete listing the queues!")
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -324,7 +378,15 @@ PagedFlux listQueuesWithOptionalTimeout(String marker, QueuesSegmentO * *

Retrieve Queue service properties

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.getProperties} + * + *
+     * client.getProperties()
+     *     .subscribe(properties -> {
+     *         System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b",
+     *             properties.getHourMetrics().isEnabled(), properties.getMinuteMetrics().isEnabled());
+     *     });
+     * 
+ * * *

For more information, see the * Azure @@ -349,7 +411,16 @@ public Mono getProperties() { * *

Retrieve Queue service properties

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.getPropertiesWithResponse} + * + *
+     * client.getPropertiesWithResponse()
+     *     .subscribe(response -> {
+     *         QueueServiceProperties properties = response.getValue();
+     *         System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b",
+     *             properties.getHourMetrics().isEnabled(), properties.getMinuteMetrics().isEnabled());
+     *     });
+     * 
+ * * *

For more information, see the * Azure @@ -385,11 +456,25 @@ Mono> getPropertiesWithResponse(Context context * *

Clear CORS in the Queue service

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.setProperties#QueueServiceProperties} + * + *
+     * QueueServiceProperties properties = client.getProperties().block();
+     * client.setProperties(properties)
+     *     .doOnSuccess(response -> System.out.println("Setting Queue service properties completed."));
+     * 
+ * * *

Enable Minute and Hour Metrics

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.setPropertiesEnableMetrics#QueueServiceProperties} + * + *
+     * QueueServiceProperties properties = client.getProperties().block();
+     * properties.getMinuteMetrics().setEnabled(true);
+     * properties.getHourMetrics().setEnabled(true);
+     * client.setProperties(properties).subscribe(
+     *     response -> System.out.println("Setting Queue service properties completed."));
+     * 
+ * * *

For more information, see the * Azure @@ -431,11 +516,27 @@ public Mono setProperties(QueueServiceProperties properties) { * *

Clear CORS in the Queue service

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.setPropertiesWithResponse#QueueServiceProperties} + * + *
+     * QueueServiceProperties properties = client.getProperties().block();
+     * client.setPropertiesWithResponse(properties)
+     *     .subscribe(response -> System.out.printf("Setting Queue service properties completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

Enable Minute and Hour Metrics

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.setPropertiesWithResponseEnableMetrics#QueueServiceProperties} + * + *
+     * QueueServiceProperties properties = client.getProperties().block();
+     * properties.getMinuteMetrics().setEnabled(true);
+     * properties.getHourMetrics().setEnabled(true);
+     * client.setPropertiesWithResponse(properties)
+     *     .subscribe(response -> System.out.printf("Setting Queue service properties completed with status code %d",
+     *         response.getStatusCode()));
+     * 
+ * * *

For more information, see the * Azure @@ -479,7 +580,15 @@ Mono> setPropertiesWithResponse(QueueServiceProperties properties * *

Retrieve the geo replication information

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.getStatistics} + * + *
+     * client.getStatistics()
+     *     .subscribe(stats -> {
+     *         System.out.printf("Geo replication status: %s, Last synced: %s",
+     *             stats.getGeoReplication().getStatus(), stats.getGeoReplication().getLastSyncTime());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -502,7 +611,16 @@ public Mono getStatistics() { * *

Retrieve the geo replication information

* - * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.getStatisticsWithResponse} + * + *
+     * client.getStatisticsWithResponse()
+     *     .subscribe(response -> {
+     *         QueueServiceStatistics stats = response.getValue();
+     *         System.out.printf("Geo replication status: %s, Last synced: %s",
+     *             stats.getGeoReplication().getStatus(), stats.getGeoReplication().getLastSyncTime());
+     *     });
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -551,7 +669,22 @@ public HttpPipeline getHttpPipeline() { * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to * queues and file shares.

- * {@codesnippet com.azure.storage.queue.QueueServiceAsyncClient.generateAccountSas#AccountSasSignatureValues} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true).setObject(true);
+     * AccountSasService services = new AccountSasService().setQueueAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = queueServiceAsyncClient.generateAccountSas(sasValues);
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @@ -568,7 +701,22 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa * *

The snippet below generates a SAS that lasts for two days and gives the user read and list access to * queues and file shares.

- * {@codesnippet com.azure.storage.queue.QueueServiceAsyncClient.generateAccountSas#AccountSasSignatureValues-Context} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true).setObject(true);
+     * AccountSasService services = new AccountSasService().setQueueAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = queueServiceAsyncClient.generateAccountSas(sasValues, new Context("key", "value"));
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceClient.java b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceClient.java index 65e8bac36944b..0a80f1078fa81 100644 --- a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceClient.java +++ b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceClient.java @@ -31,7 +31,14 @@ * *

Instantiating an Synchronous Queue Service Client

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.instantiation} + * + *
+ * QueueServiceClient client = new QueueServiceClientBuilder()
+ *     .connectionString("connectionstring")
+ *     .endpoint("endpoint")
+ *     .buildClient();
+ * 
+ * * *

View {@link QueueServiceClientBuilder this} for additional ways to construct the client.

* @@ -96,7 +103,12 @@ public QueueClient getQueueClient(String queueName) { * *

Create the queue "test"

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.createQueue#string} + * + *
+     * client.createQueue("myqueue");
+     * System.out.println("Complete creating queue.");
+     * 
+ * * * @param queueName Name of the queue * @return A response containing the QueueClient and the status of creating the queue @@ -115,7 +127,13 @@ public QueueClient createQueue(String queueName) { * *

Create the queue "test" with metadata "queue:metadata"

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.createQueueWithResponse#string-map-duration-context} + * + *
+     * Response<QueueClient> response = client.createQueueWithResponse("myqueue",
+     *     Collections.singletonMap("queue", "metadata"), Duration.ofSeconds(1), new Context(key1, value1));
+     * System.out.println("Complete creating queue with status code: " + response.getStatusCode());
+     * 
+ * * * @param queueName Name of the queue * @param metadata Metadata to associate with the queue. If there is leading or trailing whitespace in any @@ -143,7 +161,12 @@ public Response createQueueWithResponse(String queueName, MapDelete the queue "test"

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.deleteQueue#string} + * + *
+     * client.deleteQueue("myqueue");
+     * System.out.println("Complete deleting the queue.");
+     * 
+ * * * @param queueName Name of the queue * @throws QueueStorageException If the queue doesn't exist @@ -160,7 +183,13 @@ public void deleteQueue(String queueName) { * *

Delete the queue "test"

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.deleteQueueWithResponse#string-duration-context} + * + *
+     * Response<Void> response = client.deleteQueueWithResponse("myqueue", Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.println("Complete deleting the queue with status code: " + response.getStatusCode());
+     * 
+ * * * @param queueName Name of the queue * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout @@ -183,7 +212,13 @@ public Response deleteQueueWithResponse(String queueName, Duration timeout * *

List all queues in the account

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.listQueues} + * + *
+     * client.listQueues().forEach(
+     *     queueItem -> System.out.printf("Queue %s exists in the account", queueItem.getName())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -205,7 +240,15 @@ public PagedIterable listQueues() { * *

List all queues that begin with "azure"

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.listQueues#queueSergmentOptions-duration-context} + * + *
+     * client.listQueues(new QueuesSegmentOptions().setPrefix("azure"), Duration.ofSeconds(1),
+     *     new Context(key1, value1)).forEach(
+     *         queueItem -> System.out.printf("Queue %s exists in the account and has metadata %s",
+     *         queueItem.getName(), queueItem.getMetadata())
+     * );
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -251,7 +294,13 @@ PagedIterable listQueues(String marker, QueuesSegmentOptions options, * *

Retrieve Queue service properties

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.getProperties} + * + *
+     * QueueServiceProperties properties = client.getProperties();
+     * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b",
+     *     properties.getHourMetrics().isEnabled(), properties.getMinuteMetrics().isEnabled());
+     * 
+ * * *

For more information, see the * Azure @@ -272,7 +321,14 @@ public QueueServiceProperties getProperties() { * *

Retrieve Queue service properties

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.getPropertiesWithResponse#duration-context} + * + *
+     * QueueServiceProperties properties = client.getPropertiesWithResponse(Duration.ofSeconds(1),
+     *     new Context(key1, value1)).getValue();
+     * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b",
+     *     properties.getHourMetrics().isEnabled(), properties.getMinuteMetrics().isEnabled());
+     * 
+ * * *

For more information, see the * Azure @@ -302,11 +358,31 @@ public Response getPropertiesWithResponse(Duration timeo * *

Clear CORS in the Queue service

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.setProperties#QueueServiceProperties} + * + *
+     * QueueServiceProperties properties = client.getProperties();
+     * properties.setCors(Collections.emptyList());
+     *
+     * client.setProperties(properties);
+     * System.out.println("Setting Queue service properties completed.");
+     * 
+ * * *

Enable Minute and Hour Metrics

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.setPropertiesEnableMetrics#QueueServiceProperties} + * + *
+     * QueueServiceProperties properties = client.getProperties();
+     * properties.getMinuteMetrics().setEnabled(true);
+     * properties.getMinuteMetrics().setIncludeApis(true);
+     * properties.getMinuteMetrics().setRetentionPolicy(new QueueRetentionPolicy().setDays(7).setEnabled(true));
+     * properties.getHourMetrics().setEnabled(true);
+     * properties.getHourMetrics().setIncludeApis(true);
+     * properties.getHourMetrics().setRetentionPolicy(new QueueRetentionPolicy().setDays(7).setEnabled(true));
+     * client.setProperties(properties);
+     * System.out.println("Setting Queue service properties completed.");
+     * 
+ * * *

For more information, see the * Azure @@ -344,11 +420,32 @@ public void setProperties(QueueServiceProperties properties) { * *

Clear CORS in the Queue service

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.setPropertiesWithResponse#QueueServiceProperties-duration-context} + * + *
+     * QueueServiceProperties properties = client.getProperties();
+     * properties.setCors(Collections.emptyList());
+     * Response<Void> response = client.setPropertiesWithResponse(properties, Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Setting Queue service properties completed with status code %d", response.getStatusCode());
+     * 
+ * * *

Enable Minute and Hour Metrics

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.setPropertiesWithResponseEnableMetrics#QueueServiceProperties-duration-context} + * + *
+     * QueueServiceProperties properties = client.getProperties();
+     * properties.getMinuteMetrics().setEnabled(true);
+     * properties.getMinuteMetrics().setIncludeApis(true);
+     * properties.getMinuteMetrics().setRetentionPolicy(new QueueRetentionPolicy().setDays(7).setEnabled(true));
+     * properties.getHourMetrics().setEnabled(true);
+     * properties.getHourMetrics().setIncludeApis(true);
+     * properties.getHourMetrics().setRetentionPolicy(new QueueRetentionPolicy().setDays(7).setEnabled(true));
+     * Response<Void> response = client.setPropertiesWithResponse(properties, Duration.ofSeconds(1),
+     *     new Context(key1, value1));
+     * System.out.printf("Setting Queue service properties completed with status code %d", response.getStatusCode());
+     * 
+ * * *

For more information, see the * Azure @@ -387,7 +484,13 @@ public Response setPropertiesWithResponse(QueueServiceProperties propertie * *

Retrieve the geo replication information

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.getStatistics} + * + *
+     * QueueServiceStatistics stats = client.getStatistics();
+     * System.out.printf("Geo replication status: %s, Last synced: %s",
+     *     stats.getGeoReplication().getStatus(), stats.getGeoReplication().getLastSyncTime());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -406,7 +509,14 @@ public QueueServiceStatistics getStatistics() { * *

Retrieve the geo replication information

* - * {@codesnippet com.azure.storage.queue.queueServiceClient.getStatisticsWithResponse#duration-context} + * + *
+     * QueueServiceStatistics stats = client.getStatisticsWithResponse(Duration.ofSeconds(1),
+     *     new Context(key1, value1)).getValue();
+     * System.out.printf("Geo replication status: %s, Last synced: %s",
+     *     stats.getGeoReplication().getStatus(), stats.getGeoReplication().getLastSyncTime());
+     * 
+ * * *

For more information, see the * Azure Docs.

@@ -451,7 +561,22 @@ public HttpPipeline getHttpPipeline() { *

Generating an account SAS

*

The snippet below generates an AccountSasSignatureValues object that lasts for two days and gives the user * read and list access to queue and file shares.

- * {@codesnippet com.azure.storage.queue.QueueServiceClient.generateAccountSas#AccountSasSignatureValues} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true).setObject(true);
+     * AccountSasService services = new AccountSasService().setQueueAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = queueServiceClient.generateAccountSas(sasValues);
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @@ -469,7 +594,22 @@ public String generateAccountSas(AccountSasSignatureValues accountSasSignatureVa *

Generating an account SAS

*

The snippet below generates an AccountSasSignatureValues object that lasts for two days and gives the user * read and list access to queue and file shares.

- * {@codesnippet com.azure.storage.queue.QueueServiceClient.generateAccountSas#AccountSasSignatureValues-Context} + * + *
+     * AccountSasPermission permissions = new AccountSasPermission()
+     *     .setListPermission(true)
+     *     .setReadPermission(true);
+     * AccountSasResourceType resourceTypes = new AccountSasResourceType().setContainer(true).setObject(true);
+     * AccountSasService services = new AccountSasService().setQueueAccess(true).setFileAccess(true);
+     * OffsetDateTime expiryTime = OffsetDateTime.now().plus(Duration.ofDays(2));
+     *
+     * AccountSasSignatureValues sasValues =
+     *     new AccountSasSignatureValues(expiryTime, permissions, services, resourceTypes);
+     *
+     * // Client must be authenticated via StorageSharedKeyCredential
+     * String sas = queueServiceClient.generateAccountSas(sasValues, new Context("key", "value"));
+     * 
+ * * * @param accountSasSignatureValues {@link AccountSasSignatureValues} * @param context Additional context that is passed through the code when generating a SAS. diff --git a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceClientBuilder.java b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceClientBuilder.java index 15a19160ec614..3f5b6446e128f 100644 --- a/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceClientBuilder.java +++ b/sdk/storage/azure-storage-queue/src/main/java/com/azure/storage/queue/QueueServiceClientBuilder.java @@ -42,19 +42,45 @@ * the builder the a SAS token that authorizes the client.

* *

Instantiating a synchronous Queue Service Client with SAS token

- * {@codesnippet com.azure.storage.queue.queueServiceClient.instantiation.sastoken} + * + *
+ * QueueServiceClient client = new QueueServiceClientBuilder()
+ *     .endpoint("https://${accountName}.queue.core.windows.net?${SASToken}")
+ *     .buildClient();
+ * 
+ * * *

Instantiating an Asynchronous Queue Service Client with SAS token

- * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.instantiation.sastoken} + * + *
+ * QueueServiceAsyncClient client = new QueueServiceClientBuilder()
+ *     .endpoint("https://{accountName}.queue.core.windows.net?{SASToken}")
+ *     .buildAsyncClient();
+ * 
+ * * *

If the {@code endpoint} doesn't contain the query parameters to construct a SAS token they may be set using * {@link #sasToken(String) sasToken} together with endpoint.

* *

Instantiating a synchronous Queue Service Client with SAS token

- * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.instantiation.credential} + * + *
+ * QueueServiceAsyncClient client = new QueueServiceClientBuilder()
+ *     .endpoint("https://{accountName}.queue.core.windows.net")
+ *     .sasToken("{SASTokenQueryParams}")
+ *     .buildAsyncClient();
+ * 
+ * * *

Instantiating an Asynchronous Queue Service Client with SAS token

- * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.instantiation.credential} + * + *
+ * QueueServiceAsyncClient client = new QueueServiceClientBuilder()
+ *     .endpoint("https://{accountName}.queue.core.windows.net")
+ *     .sasToken("{SASTokenQueryParams}")
+ *     .buildAsyncClient();
+ * 
+ * * *

Another way to authenticate the client is using a {@link StorageSharedKeyCredential}. To create a * StorageSharedKeyCredential a connection string from the Storage Queue service must be used. @@ -63,10 +89,26 @@ * when authorizing requests sent to the service.

* *

Instantiating a synchronous Queue Service Client with connection string.

- * {@codesnippet com.azure.storage.queue.queueServiceClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};"
+ *     + "AccountKey={key};EndpointSuffix={core.windows.net}";
+ * QueueServiceClient client = new QueueServiceClientBuilder()
+ *     .connectionString(connectionString)
+ *     .buildClient();
+ * 
+ * * *

Instantiating an Asynchronous Queue Service Client with connection string.

- * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.instantiation.connectionstring} + * + *
+ * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};"
+ *     + "AccountKey={key};EndpointSuffix={core.windows.net}";
+ * QueueServiceAsyncClient client = new QueueServiceClientBuilder()
+ *     .connectionString(connectionString)
+ *     .buildAsyncClient();
+ * 
+ * * * @see QueueServiceClient * @see QueueServiceAsyncClient @@ -416,7 +458,38 @@ public QueueServiceClientBuilder messageEncoding(QueueMessageEncoding messageEnc * {@link QueueServiceAsyncClient} built by this builder. *

Code Samples

* - * {@codesnippet com.azure.storage.queue.QueueServiceClientBuilder#processMessageDecodingErrorAsyncHandler} + * + *
+     * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};"
+     *     + "AccountKey={key};EndpointSuffix={core.windows.net}";
+     *
+     * Function<QueueMessageDecodingError, Mono<Void>> processMessageDecodingErrorHandler =
+     *     (queueMessageDecodingFailure) -> {
+     *         QueueMessageItem queueMessageItem = queueMessageDecodingFailure.getQueueMessageItem();
+     *         PeekedMessageItem peekedMessageItem = queueMessageDecodingFailure.getPeekedMessageItem();
+     *         if (queueMessageItem != null) {
+     *             System.out.printf("Received badly encoded message, messageId=%s, messageBody=%s",
+     *                 queueMessageItem.getMessageId(),
+     *                 queueMessageItem.getBody().toString());
+     *             return queueMessageDecodingFailure
+     *                 .getQueueAsyncClient()
+     *                 .deleteMessage(queueMessageItem.getMessageId(), queueMessageItem.getPopReceipt());
+     *         } else if (peekedMessageItem != null) {
+     *             System.out.printf("Peeked badly encoded message, messageId=%s, messageBody=%s",
+     *                 peekedMessageItem.getMessageId(),
+     *                 peekedMessageItem.getBody().toString());
+     *             return Mono.empty();
+     *         } else {
+     *             return Mono.empty();
+     *         }
+     *     };
+     *
+     * QueueServiceClient client = new QueueServiceClientBuilder()
+     *     .connectionString(connectionString)
+     *     .processMessageDecodingErrorAsync(processMessageDecodingErrorHandler)
+     *     .buildClient();
+     * 
+ * * * @param processMessageDecodingErrorAsyncHandler the handler. * @return the updated QueueServiceClientBuilder object @@ -447,7 +520,35 @@ public QueueServiceClientBuilder processMessageDecodingErrorAsync( * {@link QueueServiceAsyncClient} built by this builder. *

Code Samples

* - * {@codesnippet com.azure.storage.queue.QueueServiceClientBuilder#processMessageDecodingErrorHandler} + * + *
+     * String connectionString = "DefaultEndpointsProtocol=https;AccountName={name};"
+     *     + "AccountKey={key};EndpointSuffix={core.windows.net}";
+     *
+     * Consumer<QueueMessageDecodingError> processMessageDecodingErrorHandler =
+     *     (queueMessageDecodingFailure) -> {
+     *         QueueMessageItem queueMessageItem = queueMessageDecodingFailure.getQueueMessageItem();
+     *         PeekedMessageItem peekedMessageItem = queueMessageDecodingFailure.getPeekedMessageItem();
+     *         if (queueMessageItem != null) {
+     *             System.out.printf("Received badly encoded message, messageId=%s, messageBody=%s",
+     *                 queueMessageItem.getMessageId(),
+     *                 queueMessageItem.getBody().toString());
+     *             queueMessageDecodingFailure
+     *                 .getQueueClient()
+     *                 .deleteMessage(queueMessageItem.getMessageId(), queueMessageItem.getPopReceipt());
+     *         } else if (peekedMessageItem != null) {
+     *             System.out.printf("Peeked badly encoded message, messageId=%s, messageBody=%s",
+     *                 peekedMessageItem.getMessageId(),
+     *                 peekedMessageItem.getBody().toString());
+     *         }
+     *     };
+     *
+     * QueueServiceClient client = new QueueServiceClientBuilder()
+     *     .connectionString(connectionString)
+     *     .processMessageDecodingError(processMessageDecodingErrorHandler)
+     *     .buildClient();
+     * 
+ * * * @param processMessageDecodingErrorHandler the handler. * @return the updated QueueServiceClientBuilder object diff --git a/sdk/storage/azure-storage-queue/src/samples/java/com/azure/storage/queue/ReadmeSamples.java b/sdk/storage/azure-storage-queue/src/samples/java/com/azure/storage/queue/ReadmeSamples.java index 0d79626b2c779..ff7f10c01d4c4 100644 --- a/sdk/storage/azure-storage-queue/src/samples/java/com/azure/storage/queue/ReadmeSamples.java +++ b/sdk/storage/azure-storage-queue/src/samples/java/com/azure/storage/queue/ReadmeSamples.java @@ -25,6 +25,7 @@ * * Code samples for the README.md */ +@SuppressWarnings("unused") public class ReadmeSamples { private static final String ACCOUNT_NAME = System.getenv("AZURE_STORAGE_ACCOUNT_NAME"); private static final String SAS_TOKEN = System.getenv("PRIMARY_SAS_TOKEN"); @@ -47,83 +48,100 @@ public class ReadmeSamples { } }; - private Logger logger = LoggerFactory.getLogger(ReadmeSamples.class); - - public void getQueueServiceClient1() { - // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". - String queueServiceURL = String.format("https://%s.queue.core.windows.net/?%s", ACCOUNT_NAME, SAS_TOKEN); - QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL).buildClient(); - } - - public void getQueueServiceClient2() { - String queueServiceURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); - QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL) - .sasToken(SAS_TOKEN).buildClient(); - } + private final Logger logger = LoggerFactory.getLogger(ReadmeSamples.class); public void handleException() { + // BEGIN: readme-sample-handleException String queueServiceURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL) - .sasToken(SAS_TOKEN).buildClient(); + .sasToken(SAS_TOKEN).buildClient(); try { queueServiceClient.createQueue("myQueue"); } catch (QueueStorageException e) { logger.error("Failed to create a queue with error code: " + e.getErrorCode()); } + // END: readme-sample-handleException } public void createQueue1() { + // BEGIN: readme-sample-createQueue1 String queueServiceURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL) - .sasToken(SAS_TOKEN).buildClient(); + .sasToken(SAS_TOKEN).buildClient(); QueueClient newQueueClient = queueServiceClient.createQueue("myQueue"); + // END: readme-sample-createQueue1 } public void createQueue2() { + // BEGIN: readme-sample-createQueue2 String queueServiceAsyncURL = String.format("https://%s.queue.core.windows.net/", ACCOUNT_NAME); QueueServiceAsyncClient queueServiceAsyncClient = new QueueServiceClientBuilder().endpoint(queueServiceAsyncURL) - .sasToken(SAS_TOKEN).buildAsyncClient(); + .sasToken(SAS_TOKEN).buildAsyncClient(); queueServiceAsyncClient.createQueue("newAsyncQueue").subscribe(result -> { // do something when new queue created }, error -> { // do something if something wrong happened - }, () -> { + }, () -> { // completed, do something - }); + }); + // END: readme-sample-createQueue2 } public void createWithResponse1() { + // BEGIN: readme-sample-createWithResponse1 String queueURL = String.format("https://%s.queue.core.windows.net/%s", ACCOUNT_NAME, queueName); QueueClient queueClient = new QueueClientBuilder().endpoint(queueURL).sasToken(SAS_TOKEN).buildClient(); // metadata is map of key-value pair queueClient.createWithResponse(metadata, Duration.ofSeconds(30), Context.NONE); + // END: readme-sample-createWithResponse1 } public void createWithResponse2() { + // BEGIN: readme-sample-createWithResponse2 // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". String queueAsyncURL = String.format("https://%s.queue.core.windows.net/%s?%s", ACCOUNT_NAME, queueAsyncName, - SAS_TOKEN); + SAS_TOKEN); QueueAsyncClient queueAsyncClient = new QueueClientBuilder().endpoint(queueAsyncURL).buildAsyncClient(); queueAsyncClient.createWithResponse(metadata).subscribe(result -> { // do something when new queue created }, error -> { // do something if something wrong happened - }, () -> { + }, () -> { // completed, do something - }); + }); + // END: readme-sample-createWithResponse2 + } + + public void getQueueServiceClient1() { + // BEGIN: readme-sample-getQueueServiceClient1 + // Only one "?" is needed here. If the sastoken starts with "?", please removing one "?". + String queueServiceURL = String.format("https://%s.queue.core.windows.net/?%s", ACCOUNT_NAME, SAS_TOKEN); + QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL).buildClient(); + // END: readme-sample-getQueueServiceClient1 + } + + public void getQueueServiceClient2() { + // BEGIN: readme-sample-getQueueServiceClient2 + String queueServiceURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); + QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL) + .sasToken(SAS_TOKEN).buildClient(); + // END: readme-sample-getQueueServiceClient2 } public void deleteQueue() { + // BEGIN: readme-sample-deleteQueue String queueServiceURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL) .sasToken(SAS_TOKEN).buildClient(); queueServiceClient.deleteQueue("myqueue"); + // END: readme-sample-deleteQueue } public void getQueueListInAccount() { + // BEGIN: readme-sample-getQueueListInAccount String queueServiceURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL) .sasToken(SAS_TOKEN).buildClient(); @@ -131,20 +149,23 @@ public void getQueueListInAccount() { // @param options: Filter for queue selection // @param timeout: An optional timeout applied to the operation. // @param context: Additional context that is passed through the Http pipeline during the service call. - queueServiceClient.listQueues(options, timeout, context).stream().forEach(queueItem -> { - System.out.printf("Queue %s exists in the account.", queueItem.getName()); - }); + queueServiceClient.listQueues(options, timeout, context).stream().forEach(queueItem -> + System.out.printf("Queue %s exists in the account.", queueItem.getName())); + // END: readme-sample-getQueueListInAccount } public void getPropertiesInQueueAccount() { + // BEGIN: readme-sample-getPropertiesInQueueAccount String queueServiceURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL) .sasToken(SAS_TOKEN).buildClient(); QueueServiceProperties properties = queueServiceClient.getProperties(); + // END: readme-sample-getPropertiesInQueueAccount } public void setPropertiesInQueueAccount() { + // BEGIN: readme-sample-setPropertiesInQueueAccount String queueServiceURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL) .sasToken(SAS_TOKEN).buildClient(); @@ -152,25 +173,31 @@ public void setPropertiesInQueueAccount() { QueueServiceProperties properties = queueServiceClient.getProperties(); properties.setCors(Collections.emptyList()); queueServiceClient.setProperties(properties); + // END: readme-sample-setPropertiesInQueueAccount } public void getQueueServiceStatistics() { + // BEGIN: readme-sample-getQueueServiceStatistics String queueServiceURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueServiceClient queueServiceClient = new QueueServiceClientBuilder().endpoint(queueServiceURL) .sasToken(SAS_TOKEN).buildClient(); QueueServiceStatistics queueStats = queueServiceClient.getStatistics(); + // END: readme-sample-getQueueServiceStatistics } public void enqueueMessage() { + // BEGIN: readme-sample-enqueueMessage String queueURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueClient queueClient = new QueueClientBuilder().endpoint(queueURL).sasToken(SAS_TOKEN).queueName("myqueue") .buildClient(); queueClient.sendMessage("myMessage"); + // END: readme-sample-enqueueMessage } - public void updateMesage() { + public void updateMessage() { + // BEGIN: readme-sample-updateMessage String queueURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueClient queueClient = new QueueClientBuilder().endpoint(queueURL).sasToken(SAS_TOKEN).queueName("myqueue") .buildClient(); @@ -178,56 +205,62 @@ public void updateMesage() { // @param popReceipt: Unique identifier that must match the message for it to be updated // @param visibilityTimeout: How long the message will be invisible in the queue in seconds queueClient.updateMessage(messageId, popReceipt, "new message", visibilityTimeout); + // END: readme-sample-updateMessage } public void peekAtMessage() { + // BEGIN: readme-sample-peekAtMessage String queueURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueClient queueClient = new QueueClientBuilder().endpoint(queueURL).sasToken(SAS_TOKEN).queueName("myqueue") .buildClient(); // @param key: The key with which the specified value should be associated. // @param value: The value to be associated with the specified key. - queueClient.peekMessages(5, Duration.ofSeconds(1), new Context(key, value)).forEach(message -> { - System.out.println(message.getBody().toString()); - }); + queueClient.peekMessages(5, Duration.ofSeconds(1), new Context(key, value)).forEach(message -> + System.out.println(message.getBody().toString())); + // END: readme-sample-peekAtMessage } public void receiveMessageFromQueue() { + // BEGIN: readme-sample-receiveMessageFromQueue String queueURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueClient queueClient = new QueueClientBuilder().endpoint(queueURL).sasToken(SAS_TOKEN).queueName("myqueue") .buildClient(); - // Try to receive 10 mesages: Maximum number of messages to get - queueClient.receiveMessages(10).forEach(message -> { - System.out.println(message.getBody().toString()); - }); + // Try to receive 10 messages: Maximum number of messages to get + queueClient.receiveMessages(10).forEach(message -> + System.out.println(message.getBody().toString())); + // END: readme-sample-receiveMessageFromQueue } public void deleteMessageFromQueue() { + // BEGIN: readme-sample-deleteMessageFromQueue String queueURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueClient queueClient = new QueueClientBuilder().endpoint(queueURL).sasToken(SAS_TOKEN).queueName("myqueue") .buildClient(); queueClient.deleteMessage(messageId, popReceipt); + // END: readme-sample-deleteMessageFromQueue } public void getQueueProperties() { + // BEGIN: readme-sample-getQueueProperties String queueURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueClient queueClient = new QueueClientBuilder().endpoint(queueURL).sasToken(SAS_TOKEN).queueName("myqueue") .buildClient(); QueueProperties properties = queueClient.getProperties(); + // END: readme-sample-getQueueProperties } public void setQueueMetadata() { + // BEGIN: readme-sample-setQueueMetadata String queueURL = String.format("https://%s.queue.core.windows.net", ACCOUNT_NAME); QueueClient queueClient = new QueueClientBuilder().endpoint(queueURL).sasToken(SAS_TOKEN).queueName("myqueue") .buildClient(); - Map metadata = new HashMap() { - { - put("key1", "val1"); - put("key2", "val2"); - } - }; + Map metadata = new HashMap<>(); + metadata.put("key1", "val1"); + metadata.put("key2", "val2"); queueClient.setMetadata(metadata); + // END: readme-sample-setQueueMetadata } }