From 00ba8032381b894016ce958a8d2ce9a4c733312a Mon Sep 17 00:00:00 2001 From: Joel Hendrix Date: Mon, 12 Sep 2022 07:41:29 -0700 Subject: [PATCH] Fix various naming issues (#19067) * Fix various naming issues Use BlockSize and Concurrency consistenty across operations. Removed XMS prefix from ContentCRC64. Renamed list blobs API and its dependent types to include the word Flat. * renamed test vars --- sdk/storage/azblob/blob/client.go | 2 +- sdk/storage/azblob/blob/models.go | 12 +-- sdk/storage/azblob/blockblob/client.go | 4 +- sdk/storage/azblob/blockblob/client_test.go | 4 +- sdk/storage/azblob/blockblob/models.go | 29 +++--- sdk/storage/azblob/blockblob/responses.go | 6 +- sdk/storage/azblob/client.go | 4 +- sdk/storage/azblob/client_test.go | 90 +++++++++---------- sdk/storage/azblob/examples_test.go | 6 +- .../azblob/internal/shared/batch_transfer.go | 10 +-- sdk/storage/azblob/models.go | 4 +- sdk/storage/azblob/responses.go | 4 +- 12 files changed, 90 insertions(+), 85 deletions(-) diff --git a/sdk/storage/azblob/blob/client.go b/sdk/storage/azblob/blob/client.go index c2b14289d544..054a48063e50 100644 --- a/sdk/storage/azblob/blob/client.go +++ b/sdk/storage/azblob/blob/client.go @@ -302,7 +302,7 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt OperationName: "downloadBlobToWriterAt", TransferSize: count, ChunkSize: o.BlockSize, - Parallelism: o.Parallelism, + Concurrency: o.Concurrency, Operation: func(chunkStart int64, count int64, ctx context.Context) error { downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ diff --git a/sdk/storage/azblob/blob/models.go b/sdk/storage/azblob/blob/models.go index a8e720401193..bc597049ffe1 100644 --- a/sdk/storage/azblob/blob/models.go +++ b/sdk/storage/azblob/blob/models.go @@ -104,8 +104,8 @@ type downloadOptions struct { CpkInfo *CpkInfo CpkScopeInfo *CpkScopeInfo - // Parallelism indicates the maximum number of blocks to download in parallel (0=default) - Parallelism uint16 + // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + Concurrency uint16 // RetryReaderOptionsPerBlock is used when downloading each block. RetryReaderOptionsPerBlock RetryReaderOptions @@ -154,8 +154,8 @@ type DownloadBufferOptions struct { // CpkScopeInfo contains a group of parameters for client provided encryption scope. CpkScopeInfo *CpkScopeInfo - // Parallelism indicates the maximum number of blocks to download in parallel (0=default) - Parallelism uint16 + // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + Concurrency uint16 // RetryReaderOptionsPerBlock is used when downloading each block. RetryReaderOptionsPerBlock RetryReaderOptions @@ -179,8 +179,8 @@ type DownloadFileOptions struct { CpkInfo *CpkInfo CpkScopeInfo *CpkScopeInfo - // Parallelism indicates the maximum number of blocks to download in parallel. The default value is 5. - Parallelism uint16 + // Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5. + Concurrency uint16 // RetryReaderOptionsPerBlock is used when downloading each block. RetryReaderOptionsPerBlock RetryReaderOptions diff --git a/sdk/storage/azblob/blockblob/client.go b/sdk/storage/azblob/blockblob/client.go index a4d0a0501bb8..66a23f695317 100644 --- a/sdk/storage/azblob/blockblob/client.go +++ b/sdk/storage/azblob/blockblob/client.go @@ -325,7 +325,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, read if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB o.BlockSize = blob.DefaultDownloadBlockSize } - // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). + // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). } } @@ -352,7 +352,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, read OperationName: "uploadFromReader", TransferSize: readerSize, ChunkSize: o.BlockSize, - Parallelism: o.Parallelism, + Concurrency: o.Concurrency, Operation: func(offset int64, count int64, ctx context.Context) error { // This function is called once per block. // It is passed this block's offset within the buffer and its count of bytes diff --git a/sdk/storage/azblob/blockblob/client_test.go b/sdk/storage/azblob/blockblob/client_test.go index d8bda1805ae7..c300da7d03c6 100644 --- a/sdk/storage/azblob/blockblob/client_test.go +++ b/sdk/storage/azblob/blockblob/client_test.go @@ -3188,8 +3188,8 @@ func (s *BlockBlobUnrecordedTestsSuite) TestUploadStreamToBlobProperties() { // Perform UploadStream _, err = bbClient.UploadStream(context.Background(), blobContentReader, &blockblob.UploadStreamOptions{ - BufferSize: bufferSize, - MaxBuffers: maxBuffers, + BlockSize: bufferSize, + Concurrency: maxBuffers, Metadata: testcommon.BasicMetadata, Tags: testcommon.BasicBlobTagsMap, HTTPHeaders: &testcommon.BasicHeaders, diff --git a/sdk/storage/azblob/blockblob/models.go b/sdk/storage/azblob/blockblob/models.go index 3df39da99596..1e93143642d8 100644 --- a/sdk/storage/azblob/blockblob/models.go +++ b/sdk/storage/azblob/blockblob/models.go @@ -193,8 +193,9 @@ type uploadFromReaderOptions struct { CpkInfo *blob.CpkInfo CpkScopeInfo *blob.CpkScopeInfo - // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) - Parallelism uint16 + // Concurrency indicates the maximum number of blocks to upload in parallel (0=default) + Concurrency uint16 + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. TransactionalContentCRC64 *[]byte // Specify the transactional md5 for the body, to be validated by the service. @@ -244,13 +245,17 @@ func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOp // UploadStreamOptions provides set of configurations for UploadStream operation type UploadStreamOptions struct { // transferManager provides a transferManager that controls buffer allocation/reuse and - // concurrency. This overrides BufferSize and MaxBuffers if set. + // concurrency. This overrides BlockSize and MaxConcurrency if set. transferManager shared.TransferManager transferMangerNotSet bool - // BufferSize sizes the buffer used to read data from source. If < 1 MiB, format to 1 MiB. - BufferSize int - // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. - MaxBuffers int + + // BlockSize defines the size of the buffer used during upload. The default and mimimum value is 1 MiB. + BlockSize int + + // Concurrency defines the number of concurrent uploads to be performed to upload the file. + // Each concurrent upload will create a buffer of size BlockSize. The default value is one. + Concurrency int + HTTPHeaders *blob.HTTPHeaders Metadata map[string]string AccessConditions *blob.AccessConditions @@ -265,16 +270,16 @@ func (u *UploadStreamOptions) format() error { return nil } - if u.MaxBuffers == 0 { - u.MaxBuffers = 1 + if u.Concurrency == 0 { + u.Concurrency = 1 } - if u.BufferSize < _1MiB { - u.BufferSize = _1MiB + if u.BlockSize < _1MiB { + u.BlockSize = _1MiB } var err error - u.transferManager, err = shared.NewStaticBuffer(u.BufferSize, u.MaxBuffers) + u.transferManager, err = shared.NewStaticBuffer(u.BlockSize, u.Concurrency) if err != nil { return fmt.Errorf("bug: default transfer manager could not be created: %s", err) } diff --git a/sdk/storage/azblob/blockblob/responses.go b/sdk/storage/azblob/blockblob/responses.go index 3024cd463694..dab1873b1a9d 100644 --- a/sdk/storage/azblob/blockblob/responses.go +++ b/sdk/storage/azblob/blockblob/responses.go @@ -63,9 +63,9 @@ type uploadFromReaderResponse struct { // VersionID contains the information returned from the x-ms-version-id header response. VersionID *string - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. // Will be a part of response only if uploading data >= internal.MaxUploadBlobBytes (= 256 * 1024 * 1024 // 256MB) - XMSContentCRC64 []byte + ContentCRC64 []byte } func toUploadReaderAtResponseFromUploadResponse(resp UploadResponse) uploadFromReaderResponse { @@ -97,7 +97,7 @@ func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListRes RequestID: resp.RequestID, Version: resp.Version, VersionID: resp.VersionID, - XMSContentCRC64: resp.XMSContentCRC64, + ContentCRC64: resp.XMSContentCRC64, } } diff --git a/sdk/storage/azblob/client.go b/sdk/storage/azblob/client.go index 94be36965858..c3cd9d02925a 100644 --- a/sdk/storage/azblob/client.go +++ b/sdk/storage/azblob/client.go @@ -111,10 +111,10 @@ func (c *Client) DeleteBlob(ctx context.Context, containerName string, blobName return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).Delete(ctx, o) } -// NewListBlobsPager returns a pager for blobs starting from the specified Marker. Use an empty +// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty // Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. // For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -func (c *Client) NewListBlobsPager(containerName string, o *ListBlobsOptions) *runtime.Pager[ListBlobsResponse] { +func (c *Client) NewListBlobsFlatPager(containerName string, o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { return c.svc.NewContainerClient(containerName).NewListBlobsFlatPager(o) } diff --git a/sdk/storage/azblob/client_test.go b/sdk/storage/azblob/client_test.go index 8a8048fd3df9..417fc9a068b8 100644 --- a/sdk/storage/azblob/client_test.go +++ b/sdk/storage/azblob/client_test.go @@ -92,7 +92,7 @@ func performUploadStreamToBlockBlobTest(t *testing.T, _require *require.Assertio // Perform UploadStream _, err = client.UploadStream(ctx, containerName, blobName, blobContentReader, - &blockblob.UploadStreamOptions{BufferSize: bufferSize, MaxBuffers: maxBuffers}) + &blockblob.UploadStreamOptions{BlockSize: bufferSize, Concurrency: maxBuffers}) // Assert that upload was successful _require.Equal(err, nil) @@ -160,7 +160,7 @@ func (s *AZBlobUnrecordedTestsSuite) TestUploadStreamToBlockBlobEmpty() { } // nolint -func performUploadAndDownloadFileTest(t *testing.T, _require *require.Assertions, testName string, fileSize, blockSize, parallelism, downloadOffset, downloadCount int) { +func performUploadAndDownloadFileTest(t *testing.T, _require *require.Assertions, testName string, fileSize, blockSize, concurrency, downloadOffset, downloadCount int) { // Set up file to upload fileName := "BigFile.bin" fileData := generateFile(fileName, fileSize) @@ -193,7 +193,7 @@ func performUploadAndDownloadFileTest(t *testing.T, _require *require.Assertions _, err = client.UploadFile(context.Background(), containerName, blobName, file, &blockblob.UploadFileOptions{ BlockSize: int64(blockSize), - Parallelism: uint16(parallelism), + Concurrency: uint16(concurrency), // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { _require.Equal(bytesTransferred > 0 && bytesTransferred <= int64(fileSize), true) @@ -226,7 +226,7 @@ func performUploadAndDownloadFileTest(t *testing.T, _require *require.Assertions Offset: int64(downloadOffset), }, BlockSize: int64(blockSize), - Parallelism: uint16(parallelism), + Concurrency: uint16(concurrency), // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { _require.Equal(bytesTransferred > 0 && bytesTransferred <= int64(fileSize), true) @@ -264,80 +264,80 @@ func performUploadAndDownloadFileTest(t *testing.T, _require *require.Assertions func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadFileInChunks() { fileSize := 8 * 1024 blockSize := 1024 - parallelism := 3 + concurrency := 3 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, parallelism, 0, 0) + performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, concurrency, 0, 0) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadFileSingleIO() { fileSize := 1024 blockSize := 2048 - parallelism := 3 + concurrency := 3 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, parallelism, 0, 0) + performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, concurrency, 0, 0) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadFileSingleRoutine() { fileSize := 8 * 1024 blockSize := 1024 - parallelism := 1 + concurrency := 1 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, parallelism, 0, 0) + performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, concurrency, 0, 0) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadFileEmpty() { fileSize := 0 blockSize := 1024 - parallelism := 3 + concurrency := 3 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, parallelism, 0, 0) + performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, concurrency, 0, 0) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadFileNonZeroOffset() { fileSize := 8 * 1024 blockSize := 1024 - parallelism := 3 + concurrency := 3 downloadOffset := 1000 downloadCount := 0 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, parallelism, downloadOffset, downloadCount) + performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, concurrency, downloadOffset, downloadCount) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadFileNonZeroCount() { fileSize := 8 * 1024 blockSize := 1024 - parallelism := 3 + concurrency := 3 downloadOffset := 0 downloadCount := 6000 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, parallelism, downloadOffset, downloadCount) + performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, concurrency, downloadOffset, downloadCount) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadFileNonZeroOffsetAndCount() { fileSize := 8 * 1024 blockSize := 1024 - parallelism := 3 + concurrency := 3 downloadOffset := 1000 downloadCount := 6000 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, parallelism, downloadOffset, downloadCount) + performUploadAndDownloadFileTest(s.T(), _require, testName, fileSize, blockSize, concurrency, downloadOffset, downloadCount) } // nolint -func performUploadAndDownloadBufferTest(t *testing.T, _require *require.Assertions, testName string, blobSize, blockSize, parallelism, downloadOffset, downloadCount int) { +func performUploadAndDownloadBufferTest(t *testing.T, _require *require.Assertions, testName string, blobSize, blockSize, concurrency, downloadOffset, downloadCount int) { // Set up buffer to upload _, bytesToUpload := testcommon.GenerateData(blobSize) @@ -359,7 +359,7 @@ func performUploadAndDownloadBufferTest(t *testing.T, _require *require.Assertio _, err = client.UploadBuffer(context.Background(), containerName, blobName, bytesToUpload, &blockblob.UploadBufferOptions{ BlockSize: int64(blockSize), - Parallelism: uint16(parallelism), + Concurrency: uint16(concurrency), // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { _require.Equal(bytesTransferred > 0 && bytesTransferred <= int64(blobSize), true) @@ -386,7 +386,7 @@ func performUploadAndDownloadBufferTest(t *testing.T, _require *require.Assertio Offset: int64(downloadOffset), }, BlockSize: int64(blockSize), - Parallelism: uint16(parallelism), + Concurrency: uint16(concurrency), // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { _require.Equal(bytesTransferred > 0 && bytesTransferred <= int64(blobSize), true) @@ -410,76 +410,76 @@ func performUploadAndDownloadBufferTest(t *testing.T, _require *require.Assertio func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadBufferInChunks() { blobSize := 8 * 1024 blockSize := 1024 - parallelism := 3 + concurrency := 3 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, parallelism, 0, 0) + performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, concurrency, 0, 0) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadBufferSingleIO() { blobSize := 1024 blockSize := 8 * 1024 - parallelism := 3 + concurrency := 3 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, parallelism, 0, 0) + performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, concurrency, 0, 0) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadBufferSingleRoutine() { blobSize := 8 * 1024 blockSize := 1024 - parallelism := 1 + concurrency := 1 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, parallelism, 0, 0) + performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, concurrency, 0, 0) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestUploadAndDownloadBufferEmpty() { blobSize := 0 blockSize := 1024 - parallelism := 3 + concurrency := 3 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, parallelism, 0, 0) + performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, concurrency, 0, 0) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestDownloadBufferWithNonZeroOffset() { blobSize := 8 * 1024 blockSize := 1024 - parallelism := 3 + concurrency := 3 downloadOffset := 1000 downloadCount := 0 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, parallelism, downloadOffset, downloadCount) + performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, concurrency, downloadOffset, downloadCount) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestDownloadBufferWithNonZeroCount() { blobSize := 8 * 1024 blockSize := 1024 - parallelism := 3 + concurrency := 3 downloadOffset := 0 downloadCount := 6000 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, parallelism, downloadOffset, downloadCount) + performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, concurrency, downloadOffset, downloadCount) } // nolint func (s *AZBlobUnrecordedTestsSuite) TestDownloadBufferWithNonZeroOffsetAndCount() { blobSize := 8 * 1024 blockSize := 1024 - parallelism := 3 + concurrency := 3 downloadOffset := 2000 downloadCount := 6 * 1024 _require := require.New(s.T()) testName := s.T().Name() - performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, parallelism, downloadOffset, downloadCount) + performUploadAndDownloadBufferTest(s.T(), _require, testName, blobSize, blockSize, concurrency, downloadOffset, downloadCount) } // nolint @@ -489,18 +489,18 @@ func (s *AZBlobUnrecordedTestsSuite) TestBasicDoBatchTransfer() { type testInstance struct { transferSize int64 chunkSize int64 - parallelism uint16 + concurrency uint16 expectError bool } testMatrix := []testInstance{ - {transferSize: 100, chunkSize: 10, parallelism: 5, expectError: false}, - {transferSize: 100, chunkSize: 9, parallelism: 4, expectError: false}, - {transferSize: 100, chunkSize: 8, parallelism: 15, expectError: false}, - {transferSize: 100, chunkSize: 1, parallelism: 3, expectError: false}, - {transferSize: 0, chunkSize: 100, parallelism: 5, expectError: false}, // empty file works - {transferSize: 100, chunkSize: 0, parallelism: 5, expectError: true}, // 0 chunk size on the other hand must fail - {transferSize: 0, chunkSize: 0, parallelism: 5, expectError: true}, + {transferSize: 100, chunkSize: 10, concurrency: 5, expectError: false}, + {transferSize: 100, chunkSize: 9, concurrency: 4, expectError: false}, + {transferSize: 100, chunkSize: 8, concurrency: 15, expectError: false}, + {transferSize: 100, chunkSize: 1, concurrency: 3, expectError: false}, + {transferSize: 0, chunkSize: 100, concurrency: 5, expectError: false}, // empty file works + {transferSize: 100, chunkSize: 0, concurrency: 5, expectError: true}, // 0 chunk size on the other hand must fail + {transferSize: 0, chunkSize: 0, concurrency: 5, expectError: true}, } for _, test := range testMatrix { @@ -512,7 +512,7 @@ func (s *AZBlobUnrecordedTestsSuite) TestBasicDoBatchTransfer() { err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ TransferSize: test.transferSize, ChunkSize: test.chunkSize, - Parallelism: test.parallelism, + Concurrency: test.concurrency, Operation: func(offset int64, chunkSize int64, ctx context.Context) error { atomic.AddInt64(&totalSizeCount, chunkSize) atomic.AddInt64(&runCount, 1) @@ -557,7 +557,7 @@ func (s *AZBlobUnrecordedTestsSuite) TestDoBatchTransferWithError() { err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ TransferSize: 5, ChunkSize: 1, - Parallelism: 5, + Concurrency: 5, Operation: func(offset int64, chunkSize int64, ctx context.Context) error { // simulate doing some work (HTTP call in real scenarios) // later chunks later longer to finish diff --git a/sdk/storage/azblob/examples_test.go b/sdk/storage/azblob/examples_test.go index 1aa8514f27ce..58bb19c1a7a6 100644 --- a/sdk/storage/azblob/examples_test.go +++ b/sdk/storage/azblob/examples_test.go @@ -93,7 +93,7 @@ func Example() { // List methods returns a pager object which can be used to iterate over the results of a paging operation. // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. // PageResponse() can be used to iterate over the results of the specific page. - pager := client.NewListBlobsPager(containerName, nil) + pager := client.NewListBlobsFlatPager(containerName, nil) for pager.More() { resp, err := pager.NextPage(context.TODO()) handleError(err) @@ -249,7 +249,7 @@ func Example_client_UploadFile() { _, err = client.UploadFile(context.TODO(), "testcontainer", "virtual/dir/path/"+fileName, fileHandler, &azblob.UploadFileOptions{ BlockSize: int64(1024), - Parallelism: uint16(3), + Concurrency: uint16(3), // If Progress is non-nil, this function is called periodically as bytes are uploaded. Progress: func(bytesTransferred int64) { fmt.Println(bytesTransferred) @@ -307,7 +307,7 @@ func Example_client_NewListBlobsPager() { client, err := azblob.NewClient(serviceURL, cred, nil) handleError(err) - pager := client.NewListBlobsPager("testcontainer", &azblob.ListBlobsOptions{ + pager := client.NewListBlobsFlatPager("testcontainer", &azblob.ListBlobsFlatOptions{ Include: container.ListBlobsInclude{Deleted: true, Versions: true}, }) diff --git a/sdk/storage/azblob/internal/shared/batch_transfer.go b/sdk/storage/azblob/internal/shared/batch_transfer.go index 82265964c8cc..a86fc582c52f 100644 --- a/sdk/storage/azblob/internal/shared/batch_transfer.go +++ b/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -15,7 +15,7 @@ import ( type BatchTransferOptions struct { TransferSize int64 ChunkSize int64 - Parallelism uint16 + Concurrency uint16 Operation func(offset int64, chunkSize int64, ctx context.Context) error OperationName string } @@ -27,19 +27,19 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { return errors.New("ChunkSize cannot be 0") } - if o.Parallelism == 0 { - o.Parallelism = 5 // default Parallelism + if o.Concurrency == 0 { + o.Concurrency = 5 // default concurrency } // Prepare and do parallel operations. numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) - operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently + operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently operationResponseChannel := make(chan error, numChunks) // Holds each response ctx, cancel := context.WithCancel(ctx) defer cancel() // Create the goroutines that process each operation (in parallel). - for g := uint16(0); g < o.Parallelism; g++ { + for g := uint16(0); g < o.Concurrency; g++ { //grIndex := g go func() { for f := range operationChannel { diff --git a/sdk/storage/azblob/models.go b/sdk/storage/azblob/models.go index c797e1e15142..099f48d17217 100644 --- a/sdk/storage/azblob/models.go +++ b/sdk/storage/azblob/models.go @@ -27,8 +27,8 @@ type DeleteBlobOptions = blob.DeleteOptions // DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method. type DownloadStreamOptions = blob.DownloadStreamOptions -// ListBlobsOptions contains the optional parameters for the container.Client.ListBlobFlatSegment method. -type ListBlobsOptions = container.ListBlobsFlatOptions +// ListBlobsFlatOptions contains the optional parameters for the container.Client.ListBlobFlatSegment method. +type ListBlobsFlatOptions = container.ListBlobsFlatOptions // ListBlobsInclude indicates what additional information the service should return with each blob. type ListBlobsInclude = container.ListBlobsInclude diff --git a/sdk/storage/azblob/responses.go b/sdk/storage/azblob/responses.go index 6bb153b8252a..86b05d098f43 100644 --- a/sdk/storage/azblob/responses.go +++ b/sdk/storage/azblob/responses.go @@ -29,8 +29,8 @@ type UploadResponse = blockblob.CommitBlockListResponse // DownloadStreamResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. type DownloadStreamResponse = blob.DownloadStreamResponse -// ListBlobsResponse contains the response from method container.Client.ListBlobFlatSegment. -type ListBlobsResponse = container.ListBlobsFlatResponse +// ListBlobsFlatResponse contains the response from method container.Client.ListBlobFlatSegment. +type ListBlobsFlatResponse = container.ListBlobsFlatResponse // ListContainersResponse contains the response from method service.Client.ListContainersSegment. type ListContainersResponse = service.ListContainersResponse