Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added Close and RetainUncommitedData to DataLakeFileUploadOptions #15046

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions sdk/storage/Azure.Storage.Files.DataLake/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
- Fixed bug where Stream returned from DataLakeFileClient.OpenWrite() did not flush while disposing preventing compatibility with using keyword.
- Fixed bug where DataLakeFileClient.Upload() could not upload read-only files.
- Fixed bug where DataLakeBlobAccessPolicy.StartsOn and .ExpiresOn would cause the process to crash.
- Added Close and RetainUncommitedData to DataLakeFileUploadOptions.

## 12.4.0 (2020-08-31)
- Fixed bug where DataLakeFileClient.Upload() would deadlock if the content stream's position was not 0.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -318,11 +318,13 @@ public DataLakeFileOpenWriteOptions() { }
public partial class DataLakeFileUploadOptions
{
public DataLakeFileUploadOptions() { }
public bool? Close { get { throw null; } set { } }
public Azure.Storage.Files.DataLake.Models.DataLakeRequestConditions Conditions { get { throw null; } set { } }
public Azure.Storage.Files.DataLake.Models.PathHttpHeaders HttpHeaders { get { throw null; } set { } }
public System.Collections.Generic.IDictionary<string, string> Metadata { get { throw null; } set { } }
public string Permissions { get { throw null; } set { } }
public System.IProgress<long> ProgressHandler { get { throw null; } set { } }
public bool? RetainUncommittedData { get { throw null; } set { } }
public Azure.Storage.StorageTransferOptions TransferOptions { get { throw null; } set { } }
public string Umask { get { throw null; } set { } }
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2042,7 +2042,7 @@ public virtual async Task<Response<PathInfo>> FlushAsync(
/// flush operation completes successfully, the service raises a file change notification with a property indicating that
/// this is the final update (the file stream has been closed). If "false" a change notification is raised indicating the
/// file has changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to indicate that
/// the file stream has been closed."
/// the file stream has been closed.
/// </param>
/// <param name="httpHeaders">
/// Optional standard HTTP header properties that can be set for the file.
Expand Down Expand Up @@ -4150,7 +4150,7 @@ private async Task<Stream> OpenWriteInternal(
}
#endregion OpenWrite

#region PartitionedUplaoder
#region PartitionedUploader
internal PartitionedUploader<DataLakeFileUploadOptions, PathInfo> GetPartitionedUploader(
StorageTransferOptions transferOptions,
ArrayPool<byte> arrayPool = null,
Expand Down Expand Up @@ -4196,8 +4196,8 @@ await client.AppendInternal(
// Flush data
return await client.FlushInternal(
position: newPosition,
retainUncommittedData: default,
close: default,
retainUncommittedData: args.RetainUncommittedData,
close: args.Close,
args.HttpHeaders,
args.Conditions,
async,
Expand All @@ -4223,8 +4223,8 @@ await client.AppendInternal(

return await client.FlushInternal(
offset + size,
retainUncommittedData: default,
close: default,
retainUncommittedData: args.RetainUncommittedData,
close: args.Close,
httpHeaders: args.HttpHeaders,
conditions: args.Conditions,
async,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,26 @@ public class DataLakeFileUploadOptions
/// </summary>
public IProgress<long> ProgressHandler { get; set; }

/// <summary>
/// If "true", uncommitted data is retained after the flush operation completes; otherwise, the uncommitted data is deleted
/// after the flush operation. The default is false. Data at offsets less than the specified position are written to the
/// file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future
/// flush operation.
/// </summary>
public bool? RetainUncommittedData { get; set; }

/// <summary>
/// Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled,
/// a file changed event is raised. This event has a property indicating whether this is the final change to distinguish the
/// difference between an intermediate flush to a file stream and the final close of a file stream. The close query parameter
/// is valid only when the action is "flush" and change notifications are enabled. If the value of close is "true" and the
/// flush operation completes successfully, the service raises a file change notification with a property indicating that
/// this is the final update (the file stream has been closed). If "false" a change notification is raised indicating the
/// file has changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to indicate that
/// the file stream has been closed.
/// </summary>
public bool? Close { get; set; }

/// <summary>
/// Optional <see cref="StorageTransferOptions"/> to configure
/// parallel transfer behavior.
Expand Down
25 changes: 25 additions & 0 deletions sdk/storage/Azure.Storage.Files.DataLake/tests/FileClientTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -2824,6 +2824,31 @@ await file.UploadAsync(
TestHelper.AssertSequenceEqual(expectedData, actual.ToArray());
}

[Test]
public async Task UploadAsync_CloseAndRetainData()
{
// Arrange
await using DisposingFileSystem test = await GetNewFileSystem();
DataLakeFileClient file = test.FileSystem.GetFileClient(GetNewFileName());

byte[] data = GetRandomBuffer(Constants.KB);

DataLakeFileUploadOptions options = new DataLakeFileUploadOptions
{
Close = true,
RetainUncommittedData = true
};

// Act
using Stream stream = new MemoryStream(data);
await file.UploadAsync(stream, options: options);

// Assert
using var actual = new MemoryStream();
await file.ReadToAsync(actual);
TestHelper.AssertSequenceEqual(data, actual.ToArray());
}

[Test]
[Ignore("Live tests will run out of memory")]
public async Task UploadAsync_FileLarge()
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading