diff --git a/azfile/bytes_writer.go b/azfile/bytes_writer.go new file mode 100644 index 0000000..a53cc08 --- /dev/null +++ b/azfile/bytes_writer.go @@ -0,0 +1,24 @@ +package azfile + +import ( + "errors" +) + +type bytesWriter []byte + +func NewBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("Offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/azfile/highlevel.go b/azfile/highlevel.go index 5fb795e..ec45760 100644 --- a/azfile/highlevel.go +++ b/azfile/highlevel.go @@ -6,7 +6,6 @@ import ( "fmt" "io" - "bytes" "os" "sync" @@ -21,7 +20,7 @@ const ( fileSegmentSize = 500 * 1024 * 1024 ) -// UploadToAzureFileOptions identifies options used by the UploadBufferToAzureFile and UploadFileToAzureFile functions. +// UploadToAzureFileOptions identifies options used by the UploadReaderAtToAzureFile and UploadFileToAzureFile functions. type UploadToAzureFileOptions struct { // RangeSize specifies the range size to use in each parallel upload; the default (and maximum size) is FileMaxUploadRangeBytes. RangeSize int64 @@ -39,11 +38,10 @@ type UploadToAzureFileOptions struct { Metadata Metadata } -// UploadBufferToAzureFile uploads a buffer to an Azure file. +// UploadReaderAtToAzureFile uploads a buffer to an Azure file. // Note: o.RangeSize must be >= 0 and <= FileMaxUploadRangeBytes, and if not specified, method will use FileMaxUploadRangeBytes by default. // The total size to be uploaded should be <= FileMaxSizeInBytes. -func UploadBufferToAzureFile(ctx context.Context, b []byte, - fileURL FileURL, o UploadToAzureFileOptions) error { +func UploadReaderAtToAzureFile(ctx context.Context, reader io.ReaderAt, readerSize int64, fileURL FileURL, o UploadToAzureFileOptions) error { // 1. Validate parameters, and set defaults. if o.RangeSize < 0 || o.RangeSize > FileMaxUploadRangeBytes { @@ -53,20 +51,18 @@ func UploadBufferToAzureFile(ctx context.Context, b []byte, o.RangeSize = FileMaxUploadRangeBytes } - size := int64(len(b)) - parallelism := o.Parallelism if parallelism == 0 { parallelism = defaultParallelCount // default parallelism } // 2. Try to create the Azure file. - _, err := fileURL.Create(ctx, size, o.FileHTTPHeaders, o.Metadata) + _, err := fileURL.Create(ctx, readerSize, o.FileHTTPHeaders, o.Metadata) if err != nil { return err } // If size equals to 0, upload nothing and directly return. - if size == 0 { + if readerSize == 0 { return nil } @@ -75,12 +71,12 @@ func UploadBufferToAzureFile(ctx context.Context, b []byte, progressLock := &sync.Mutex{} return doBatchTransfer(ctx, batchTransferOptions{ - transferSize: size, + transferSize: readerSize, chunkSize: o.RangeSize, parallelism: parallelism, operation: func(offset int64, curRangeSize int64) error { // Prepare to read the proper section of the buffer. - var body io.ReadSeeker = bytes.NewReader(b[offset : offset+curRangeSize]) + var body io.ReadSeeker = io.NewSectionReader(reader, offset, curRangeSize) if o.Progress != nil { rangeProgress := int64(0) body = pipeline.NewRequestBodyProgress(body, @@ -97,7 +93,7 @@ func UploadBufferToAzureFile(ctx context.Context, b []byte, _, err := fileURL.UploadRange(ctx, int64(offset), body, nil) return err }, - operationName: "UploadBufferToAzureFile", + operationName: "UploadReaderAtToAzureFile", }) } @@ -109,15 +105,7 @@ func UploadFileToAzureFile(ctx context.Context, file *os.File, if err != nil { return err } - m := mmf{} // Default to an empty slice; used for 0-size file - if stat.Size() != 0 { - m, err = newMMF(file, false, 0, int(stat.Size())) - if err != nil { - return err - } - defer m.unmap() - } - return UploadBufferToAzureFile(ctx, m, fileURL, o) + return UploadReaderAtToAzureFile(ctx, file, stat.Size(), fileURL, o) } // DownloadFromAzureFileOptions identifies options used by the DownloadAzureFileToBuffer and DownloadAzureFileToFile functions. @@ -247,17 +235,12 @@ func DownloadAzureFileToFile(ctx context.Context, fileURL FileURL, file *os.File } } - // 4. Set mmap and call DownloadAzureFileToBuffer, in this case file size should be > 0. - m := mmf{} // Default to an empty slice; used for 0-size file - if azfileSize > 0 { - m, err = newMMF(file, true, 0, int(azfileSize)) - if err != nil { - return nil, err - } - defer m.unmap() + b := make([]byte, azfileSize) + _, err = file.Read(b) + if err != nil { + return nil, err } - - return downloadAzureFileToBuffer(ctx, fileURL, azfileProperties, m, o) + return downloadAzureFileToBuffer(ctx, fileURL, azfileProperties, b, o) } // BatchTransferOptions identifies options used by doBatchTransfer. diff --git a/azfile/section_writer.go b/azfile/section_writer.go new file mode 100644 index 0000000..566ec50 --- /dev/null +++ b/azfile/section_writer.go @@ -0,0 +1,47 @@ +package azfile + +import ( + "errors" + "io" +) + +type SectionWriter struct { + Count int64 + Offset int64 + Position int64 + WriterAt io.WriterAt +} + +func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { + return &SectionWriter{ + Count: count, + Offset: off, + WriterAt: c, + } +} + +func (c *SectionWriter) Write(p []byte) (int, error) { + remaining := c.Count - c.Position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) + c.Position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/azfile/url_file.go b/azfile/url_file.go index 74820f0..64f4150 100644 --- a/azfile/url_file.go +++ b/azfile/url_file.go @@ -107,11 +107,11 @@ func (f FileURL) AbortCopy(ctx context.Context, copyID string) (*FileAbortCopyRe return f.fileClient.AbortCopy(ctx, copyID, nil, nil) } -// Download downloads count bytes of data from the start offset. +// Download downloads Count bytes of data from the start Offset. // The response includes all of the file’s properties. However, passing true for rangeGetContentMD5 returns the range’s MD5 in the ContentMD5 // response header/property if the range is <= 4MB; the HTTP request fails with 400 (Bad Request) if the requested range is greater than 4MB. -// Note: offset must be >=0, count must be >= 0. -// If count is CountToEnd (0), then data is read from specified offset to the end. +// Note: Offset must be >=0, Count must be >= 0. +// If Count is CountToEnd (0), then data is read from specified Offset to the end. // rangeGetContentMD5 only works with partial data downloading. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-file. func (f FileURL) Download(ctx context.Context, offset int64, count int64, rangeGetContentMD5 bool) (*RetryableDownloadResponse, error) { @@ -197,7 +197,7 @@ func (f FileURL) Resize(ctx context.Context, length int64) (*FileSetHTTPHeadersR } // UploadRange writes bytes to a file. -// offset indicates the offset at which to begin writing, in bytes. +// Offset indicates the Offset at which to begin writing, in bytes. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-range. func (f FileURL) UploadRange(ctx context.Context, offset int64, body io.ReadSeeker, transactionalMD5 []byte) (*FileUploadRangeResponse, error) { if body == nil { @@ -214,7 +214,7 @@ func (f FileURL) UploadRange(ctx context.Context, offset int64, body io.ReadSeek } // Update range with bytes from a specific URL. -// offset indicates the offset at which to begin writing, in bytes. +// Offset indicates the Offset at which to begin writing, in bytes. func (f FileURL) UploadRangeFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64) (*FileUploadRangeFromURLResponse, error) { @@ -223,21 +223,21 @@ func (f FileURL) UploadRangeFromURL(ctx context.Context, sourceURL url.URL, sour } // ClearRange clears the specified range and releases the space used in storage for that range. -// offset means the start offset of the range to clear. -// count means count of bytes to clean, it cannot be CountToEnd (0), and must be explicitly specified. +// Offset means the start Offset of the range to clear. +// Count means Count of bytes to clean, it cannot be CountToEnd (0), and must be explicitly specified. // If the range specified is not 512-byte aligned, the operation will write zeros to // the start or end of the range that is not 512-byte aligned and free the rest of the range inside that is 512-byte aligned. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-range. func (f FileURL) ClearRange(ctx context.Context, offset int64, count int64) (*FileUploadRangeResponse, error) { if count <= 0 { - return nil, errors.New("invalid argument, count cannot be CountToEnd, and must be > 0") + return nil, errors.New("invalid argument, Count cannot be CountToEnd, and must be > 0") } return f.fileClient.UploadRange(ctx, *toRange(offset, count), FileRangeWriteClear, 0, nil, nil, nil, nil) } // GetRangeList returns the list of valid ranges for a file. -// Use a count with value CountToEnd (0) to indicate the left part of file start from offset. +// Use a Count with value CountToEnd (0) to indicate the left part of file start from Offset. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/list-ranges. func (f FileURL) GetRangeList(ctx context.Context, offset int64, count int64) (*ShareFileRangeList, error) { return f.fileClient.GetRangeList(ctx, diff --git a/azfile/zc_mmf_unix.go b/azfile/zc_mmf_unix.go deleted file mode 100644 index 54f2f9f..0000000 --- a/azfile/zc_mmf_unix.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux darwin freebsd - -package azfile - -import ( - "os" - - "golang.org/x/sys/unix" -) - -type mmf []byte - -func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { - prot, flags := unix.PROT_READ, unix.MAP_SHARED // Assume read-only - if writable { - prot, flags = unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED - } - addr, err := unix.Mmap(int(file.Fd()), offset, length, prot, flags) - return mmf(addr), err -} - -func (m *mmf) unmap() { - err := unix.Munmap(*m) - *m = nil - if err != nil { - sanityCheckFailed(err.Error()) - } -} diff --git a/azfile/zc_mmf_windows.go b/azfile/zc_mmf_windows.go deleted file mode 100644 index 0050479..0000000 --- a/azfile/zc_mmf_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package azfile - -import ( - "os" - "reflect" - "unsafe" - - "golang.org/x/sys/windows" -) - -type mmf []byte - -func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { - prot, access := uint32(windows.PAGE_READONLY), uint32(windows.FILE_MAP_READ) // Assume read-only - if writable { - prot, access = uint32(windows.PAGE_READWRITE), uint32(windows.FILE_MAP_WRITE) - } - maxSize := int64(offset + int64(length)) - hMMF, errno := windows.CreateFileMapping(windows.Handle(file.Fd()), nil, prot, uint32(maxSize>>32), uint32(maxSize&0xffffffff), nil) - if hMMF == 0 { - return nil, os.NewSyscallError("CreateFileMapping", errno) - } - defer windows.CloseHandle(hMMF) - addr, errno := windows.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) - m := mmf{} - h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) - h.Data = addr - h.Len = length - h.Cap = h.Len - return m, nil -} - -func (m *mmf) unmap() { - addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) - *m = mmf{} - err := windows.UnmapViewOfFile(addr) - if err != nil { - sanityCheckFailed(err.Error()) - } -} diff --git a/azfile/zc_policy_retry.go b/azfile/zc_policy_retry.go index 8ca34dc..a3db889 100644 --- a/azfile/zc_policy_retry.go +++ b/azfile/zc_policy_retry.go @@ -137,7 +137,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { // Even tries go against primary; odd tries go against the secondary // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) // If secondary gets a 404, don't fail, retry but future retries are only against the primary - // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) + // When retrying against a secondary, ignore the retry Count and wait (.1 second * random(0.8, 1.2)) for try := int32(1); try <= o.MaxTries; try++ { logf("\n=====> Try=%d\n", try) @@ -160,7 +160,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { requestCopy := request.Copy() // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because - // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // the stream may not be at Offset 0 when we first get it and we want the same behavior for the // 1st try as for additional tries. if err = requestCopy.RewindBody(); err != nil { sanityCheckFailed(err.Error()) @@ -367,10 +367,10 @@ func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { } return n, improveDeadlineExceeded(err) } -func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { +func (r *deadlineExceededReadCloser) Seek(Offset int64, whence int) (int64, error) { // For an HTTP request, the ReadCloser MUST also implement seek // For an HTTP response, Seek MUST not be called (or this will panic) - o, err := r.r.(io.Seeker).Seek(offset, whence) + o, err := r.r.(io.Seeker).Seek(Offset, whence) return o, improveDeadlineExceeded(err) } func (r *deadlineExceededReadCloser) Close() error { diff --git a/azfile/zc_retry_reader.go b/azfile/zc_retry_reader.go index 2d962d8..e9ab634 100644 --- a/azfile/zc_retry_reader.go +++ b/azfile/zc_retry_reader.go @@ -15,12 +15,12 @@ type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, err // HTTPGetterInfo is passed to an HTTPGetter function passing it parameters // that should be used to make an HTTP GET request. type HTTPGetterInfo struct { - // Offset specifies the start offset that should be used when + // Offset specifies the start Offset that should be used when // creating the HTTP GET request's Range header Offset int64 - // Count specifies the count of bytes that should be used to calculate - // the end offset when creating the HTTP GET request's Range header + // Count specifies the Count of bytes that should be used to calculate + // the end Offset when creating the HTTP GET request's Range header Count int64 // ETag specifies the resource's etag that should be used when creating @@ -105,7 +105,7 @@ func (s *retryReader) Read(p []byte) (n int, err error) { for try := 0; ; try++ { //fmt.Println(try) // Comment out for debugging. if s.countWasBounded && s.info.Count == CountToEnd { - // User specified an original count and the remaining bytes are 0, return 0, EOF + // User specified an original Count and the remaining bytes are 0, return 0, EOF return 0, io.EOF } @@ -134,16 +134,16 @@ func (s *retryReader) Read(p []byte) (n int, err error) { // We successfully read data or end EOF. if err == nil || err == io.EOF { - s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + s.info.Offset += int64(n) // Increments the start Offset in case we need to make a new HTTP request in the future if s.info.Count != CountToEnd { - s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + s.info.Count -= int64(n) // Decrement the Count in case we need to make a new HTTP request in the future } return n, err // Return the return to the caller } s.Close() // Error, close stream s.setResponse(nil) // Our stream is no longer good - // Check the retry count and error code, and decide whether to retry. + // Check the retry Count and error code, and decide whether to retry. retriesExhausted := try >= s.o.MaxRetryRequests _, isNetError := err.(net.Error) isUnexpectedEOF := err == io.ErrUnexpectedEOF diff --git a/azfile/zc_util_validate.go b/azfile/zc_util_validate.go index 37f1be3..b5921c8 100644 --- a/azfile/zc_util_validate.go +++ b/azfile/zc_util_validate.go @@ -7,13 +7,13 @@ import ( ) const ( - // CountToEnd indiciates a flag for count parameter. It means the count of bytes - // from start offset to the end of file. + // CountToEnd indiciates a flag for Count parameter. It means the Count of bytes + // from start Offset to the end of file. CountToEnd = 0 ) // httpRange defines a range of bytes within an HTTP resource, starting at offset and -// ending at offset+count-1 inclusively. +// ending at offset+Count-1 inclusively. // An httpRange which has a zero-value offset, and a count with value CountToEnd indicates the entire resource. // An httpRange which has a non zero-value offset but a count with value CountToEnd indicates from the offset to the resource's end. type httpRange struct { @@ -30,7 +30,7 @@ func (r httpRange) pointers() *string { } // toRange makes range string adhere to REST API. -// A count with value CountToEnd means count of bytes from offset to the end of file. +// A Count with value CountToEnd means Count of bytes from Offset to the end of file. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-the-range-header-for-file-service-operations. func toRange(offset int64, count int64) *string { // No additional validation by design. API can validate parameter by case, and use this method. @@ -63,6 +63,6 @@ func validateSeekableStreamAt0(body io.ReadSeeker) { if err != nil { sanityCheckFailed(err.Error()) } - sanityCheckFailed("stream must be set to position 0") + sanityCheckFailed("stream must be set to Position 0") } } diff --git a/azfile/zt_byte_writer_test.go b/azfile/zt_byte_writer_test.go new file mode 100644 index 0000000..e75d752 --- /dev/null +++ b/azfile/zt_byte_writer_test.go @@ -0,0 +1,30 @@ +package azfile_test + +import ( + "bytes" + "github.com/Azure/azure-storage-file-go/azfile" + chk "gopkg.in/check.v1" +) + +func (s *FileURLSuite) TestBytesWriterWriteAt(c *chk.C) { + b := make([]byte, 10) + buffer := azfile.NewBytesWriter(b) + + count, err := buffer.WriteAt([]byte{1, 2}, 10) + c.Assert(err, chk.ErrorMatches, "Offset value is out of range") + c.Assert(count, chk.Equals, 0) + + count, err = buffer.WriteAt([]byte{1, 2}, -1) + c.Assert(err, chk.ErrorMatches, "Offset value is out of range") + c.Assert(count, chk.Equals, 0) + + count, err = buffer.WriteAt([]byte{1, 2}, 9) + c.Assert(err, chk.ErrorMatches, "Not enough space for all bytes") + c.Assert(count, chk.Equals, 1) + c.Assert(bytes.Compare(b, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}), chk.Equals, 0) + + count, err = buffer.WriteAt([]byte{1, 2}, 8) + c.Assert(err, chk.IsNil) + c.Assert(count, chk.Equals, 2) + c.Assert(bytes.Compare(b, []byte{0, 0, 0, 0, 0, 0, 0, 0, 1, 2}), chk.Equals, 0) +} diff --git a/azfile/zt_examples_test.go b/azfile/zt_examples_test.go index cfcdd47..aea5d16 100644 --- a/azfile/zt_examples_test.go +++ b/azfile/zt_examples_test.go @@ -84,7 +84,7 @@ func Example() { } // Download the file's contents and verify that it worked correctly. - // User can specify 0 as offset and azfile.CountToEnd(-1) as count to indiciate downloading the entire file. + // User can specify 0 as Offset and azfile.CountToEnd(-1) as Count to indiciate downloading the entire file. get, err := fileURL.Download(ctx, 0, azfile.CountToEnd, false) if err != nil { log.Fatal(err) @@ -661,7 +661,7 @@ func ExampleFileURL() { } // Let's get all the data saved in the file, and verify if data is correct. - // User can specify 0 as offset and azfile.CountToEnd(-1) as count to indiciate downloading the entire file. + // User can specify 0 as Offset and azfile.CountToEnd(-1) as Count to indiciate downloading the entire file. get, err := fileURL.Download(ctx, 0, azfile.CountToEnd, false) if err != nil { log.Fatal(err) @@ -899,7 +899,7 @@ func ExampleFileURL_Download() { fileURL := azfile.NewFileURL(*u, azfile.NewPipeline(credential, azfile.PipelineOptions{})) // Trigger download. - downloadResponse, err := fileURL.Download(context.Background(), 0, azfile.CountToEnd, false) // 0 offset and azfile.CountToEnd(-1) count means download entire file. + downloadResponse, err := fileURL.Download(context.Background(), 0, azfile.CountToEnd, false) // 0 Offset and azfile.CountToEnd(-1) Count means download entire file. if err != nil { log.Fatal(err) } @@ -996,7 +996,7 @@ func ExampleDownloadAzureFileToFile() { } defer file.Close() - // Trigger parallel download with Parallelism set to 3, MaxRetryRequestsPerRange means the count of retry requests + // Trigger parallel download with Parallelism set to 3, MaxRetryRequestsPerRange means the Count of retry requests // could be sent if there is error during reading stream. downloadResponse, err := azfile.DownloadAzureFileToFile(context.Background(), fileURL, file, azfile.DownloadFromAzureFileOptions{ diff --git a/azfile/zt_section_writer_test.go b/azfile/zt_section_writer_test.go new file mode 100644 index 0000000..5d4fe74 --- /dev/null +++ b/azfile/zt_section_writer_test.go @@ -0,0 +1,91 @@ +package azfile_test + +import ( + "bytes" + "github.com/Azure/azure-storage-file-go/azfile" + chk "gopkg.in/check.v1" + "io" +) + +func (s *FileURLSuite) TestSectionWriter(c *chk.C) { + b := [10]byte{} + buffer := azfile.NewBytesWriter(b[:]) + + section := azfile.NewSectionWriter(buffer, 0, 5) + c.Assert(section.Count, chk.Equals, int64(5)) + c.Assert(section.Offset, chk.Equals, int64(0)) + c.Assert(section.Position, chk.Equals, int64(0)) + + count, err := section.Write([]byte{1, 2, 3}) + c.Assert(err, chk.IsNil) + c.Assert(count, chk.Equals, 3) + c.Assert(section.Position, chk.Equals, int64(3)) + c.Assert(b, chk.Equals, [10]byte{1, 2, 3, 0, 0, 0, 0, 0, 0, 0}) + + count, err = section.Write([]byte{4, 5, 6}) + c.Assert(err, chk.ErrorMatches, "Not enough space for all bytes") + c.Assert(count, chk.Equals, 2) + c.Assert(section.Position, chk.Equals, int64(5)) + c.Assert(b, chk.Equals, [10]byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}) + + count, err = section.Write([]byte{6, 7, 8}) + c.Assert(err, chk.ErrorMatches, "End of section reached") + c.Assert(count, chk.Equals, 0) + c.Assert(section.Position, chk.Equals, int64(5)) + c.Assert(b, chk.Equals, [10]byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}) + + // Intentionally create a section writer which will attempt to write + // outside the bounds of the buffer. + section = azfile.NewSectionWriter(buffer, 5, 6) + c.Assert(section.Count, chk.Equals, int64(6)) + c.Assert(section.Offset, chk.Equals, int64(5)) + c.Assert(section.Position, chk.Equals, int64(0)) + + count, err = section.Write([]byte{6, 7, 8}) + c.Assert(err, chk.IsNil) + c.Assert(count, chk.Equals, 3) + c.Assert(section.Position, chk.Equals, int64(3)) + c.Assert(b, chk.Equals, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 0, 0}) + + // Attempt to write past the end of the section. Since the underlying + // buffer rejects the write it gives the same error as in the normal case. + count, err = section.Write([]byte{9, 10, 11}) + c.Assert(err, chk.ErrorMatches, "Not enough space for all bytes") + c.Assert(count, chk.Equals, 2) + c.Assert(section.Position, chk.Equals, int64(5)) + c.Assert(b, chk.Equals, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + + // Attempt to write past the end of the buffer. In this case the buffer + // rejects the write completely since it falls completely out of bounds. + count, err = section.Write([]byte{11, 12, 13}) + c.Assert(err, chk.ErrorMatches, "Offset value is out of range") + c.Assert(count, chk.Equals, 0) + c.Assert(section.Position, chk.Equals, int64(5)) + c.Assert(b, chk.Equals, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) +} + +func (s *aztestsSuite) TestSectionWriterCopySrcDestEmpty(c *chk.C) { + input := make([]byte, 0) + reader := bytes.NewReader(input) + + output := make([]byte, 0) + buffer := azfile.NewBytesWriter(output) + section := azfile.NewSectionWriter(buffer, 0, 0) + + count, err := io.Copy(section, reader) + c.Assert(err, chk.IsNil) + c.Assert(count, chk.Equals, int64(0)) +} + +func (s *aztestsSuite) TestSectionWriterCopyDestEmpty(c *chk.C) { + input := make([]byte, 10) + reader := bytes.NewReader(input) + + output := make([]byte, 0) + buffer := azfile.NewBytesWriter(output) + section := azfile.NewSectionWriter(buffer, 0, 0) + + count, err := io.Copy(section, reader) + c.Assert(err, chk.ErrorMatches, "End of section reached") + c.Assert(count, chk.Equals, int64(0)) +} diff --git a/azfile/zt_uploaddownload_test.go b/azfile/zt_uploaddownload_test.go index 8d542e8..03a804f 100644 --- a/azfile/zt_uploaddownload_test.go +++ b/azfile/zt_uploaddownload_test.go @@ -354,11 +354,11 @@ func (ud *uploadDownloadSuite) TestDownloadDefaultParam(c *chk.C) { c.Assert(resp.ContentLength(), chk.Equals, fileSize) retryReader := resp.Body(RetryReaderOptions{}) - bytes, err := ioutil.ReadAll(retryReader) + bytes1, err := ioutil.ReadAll(retryReader) zeroBytes := make([]byte, fileSize, fileSize) c.Assert(err, chk.IsNil) - c.Assert(int64(len(bytes)), chk.Equals, fileSize) - c.Assert(zeroBytes, chk.DeepEquals, bytes) + c.Assert(int64(len(bytes1)), chk.Equals, fileSize) + c.Assert(zeroBytes, chk.DeepEquals, bytes1) } func (ud *uploadDownloadSuite) TestDownloadNegativePanic(c *chk.C) { @@ -446,7 +446,9 @@ func (ud *uploadDownloadSuite) TestUploadDownloadBufferParallelNonDefault(c *chk "overwrite": "overwrite", } - err := UploadBufferToAzureFile(ctx, srcBytes, file, UploadToAzureFileOptions{FileHTTPHeaders: headers, Metadata: metadata}) + reader := bytes.NewReader(srcBytes) + readerSize := int64(len(srcBytes)) + err := UploadReaderAtToAzureFile(ctx, reader, readerSize, file, UploadToAzureFileOptions{FileHTTPHeaders: headers, Metadata: metadata}) c.Assert(err, chk.IsNil) destBytes := make([]byte, fileSize) @@ -464,7 +466,9 @@ func (ud *uploadDownloadSuite) TestUploadDownloadBufferParallelNonDefault(c *chk c.Assert(destBytes, chk.DeepEquals, srcBytes) // Test overwrite scenario - err = UploadBufferToAzureFile(ctx, srcBytes2, file, UploadToAzureFileOptions{FileHTTPHeaders: headers2, Metadata: metadata2}) + reader2 := bytes.NewReader(srcBytes2) + readerSize2 := int64(len(srcBytes2)) + err = UploadReaderAtToAzureFile(ctx, reader2, readerSize2, file, UploadToAzureFileOptions{FileHTTPHeaders: headers2, Metadata: metadata2}) c.Assert(err, chk.IsNil) destBytes2 := make([]byte, fileSize2) @@ -482,7 +486,7 @@ func (ud *uploadDownloadSuite) TestUploadDownloadBufferParallelNonDefault(c *chk c.Assert(destBytes2, chk.DeepEquals, srcBytes2) } -// Customzied range size, parallel count and progress update. +// Customzied range size, parallel Count and progress update. func (ud *uploadDownloadSuite) TestUploadDownloadBufferParallelCheckProgress(c *chk.C) { fsu := getFSU() share, _ := createNewShare(c, fsu) @@ -500,8 +504,10 @@ func (ud *uploadDownloadSuite) TestUploadDownloadBufferParallelCheckProgress(c * uLogBuffer := bytes.Buffer{} dLogBuffer := bytes.Buffer{} - err := UploadBufferToAzureFile( - ctx, srcBytes, file, + reader := bytes.NewReader(srcBytes) + readerSize := int64(len(srcBytes)) + err := UploadReaderAtToAzureFile( + ctx, reader, readerSize, file, UploadToAzureFileOptions{ RangeSize: int64(blockSize), Parallelism: 3, @@ -564,7 +570,7 @@ func testUploadDownloadFileParallelDefault(c *chk.C, fileSize int64) { fileURL, _ := getFileURLFromShare(c, share) - file, bytes := createNewLocalFile(c, fileSize) + file, bytes1 := createNewLocalFile(c, fileSize) defer func() { file.Close() os.Remove(file.Name()) @@ -596,7 +602,7 @@ func testUploadDownloadFileParallelDefault(c *chk.C, fileSize int64) { destBytes, err := ioutil.ReadFile(file2Name) c.Assert(err, chk.IsNil) - c.Assert(bytes, chk.DeepEquals, destBytes) + c.Assert(bytes1, chk.DeepEquals, destBytes) } func (ud *uploadDownloadSuite) TestUploadFileToAzureFileNegativeInvalidRangeSize(c *chk.C) { @@ -606,7 +612,9 @@ func (ud *uploadDownloadSuite) TestUploadFileToAzureFileNegativeInvalidRangeSize shareURL, _ := getShareURL(c, fsu) fileURL, _ := getFileURLFromShare(c, shareURL) - err := UploadBufferToAzureFile(ctx, srcBytes, fileURL, UploadToAzureFileOptions{RangeSize: -1}) + reader := bytes.NewReader(srcBytes) + readerSize := int64(len(srcBytes)) + err := UploadReaderAtToAzureFile(ctx, reader, readerSize, fileURL, UploadToAzureFileOptions{RangeSize: -1}) c.Assert(err, chk.NotNil) c.Assert(strings.Contains(err.Error(), "o.RangeSize must be >= 0"), chk.Equals, true) } @@ -618,7 +626,9 @@ func (ud *uploadDownloadSuite) TestUploadFileToAzureFileNegativeInvalidRangeSize shareURL, _ := getShareURL(c, fsu) fileURL, _ := getFileURLFromShare(c, shareURL) - err := UploadBufferToAzureFile(ctx, srcBytes, fileURL, UploadToAzureFileOptions{RangeSize: FileMaxUploadRangeBytes + 1}) + reader := bytes.NewReader(srcBytes) + readerSize := int64(len(srcBytes)) + err := UploadReaderAtToAzureFile(ctx, reader, readerSize, fileURL, UploadToAzureFileOptions{RangeSize: FileMaxUploadRangeBytes + 1}) c.Assert(err, chk.NotNil) c.Assert(strings.Contains(err.Error(), "o.RangeSize must be >= 0 and <= 4194304, in bytes"), chk.Equals, true) } @@ -690,7 +700,9 @@ func (ud *uploadDownloadSuite) TestDownloadFileParallelOverwriteLocalFile(c *chk "bar": "barvalue", } - err := UploadBufferToAzureFile(ctx, srcBytes, fileURL, UploadToAzureFileOptions{FileHTTPHeaders: headers, Metadata: metadata}) + reader := bytes.NewReader(srcBytes) + readerSize := int64(len(srcBytes)) + err := UploadReaderAtToAzureFile(ctx, reader, readerSize, fileURL, UploadToAzureFileOptions{FileHTTPHeaders: headers, Metadata: metadata}) c.Assert(err, chk.IsNil) resp, err := DownloadAzureFileToFile(ctx, fileURL, localFile, DownloadFromAzureFileOptions{}) diff --git a/azfile/zt_url_file_test.go b/azfile/zt_url_file_test.go index aa650ff..3f00d56 100644 --- a/azfile/zt_url_file_test.go +++ b/azfile/zt_url_file_test.go @@ -1058,7 +1058,7 @@ func (s *FileURLSuite) TestFileDownloadDataNonExistantFile(c *chk.C) { validateStorageError(c, err, azfile.ServiceCodeResourceNotFound) } -// Don't check offset by design. +// Don't check Offset by design. // func (s *FileURLSuite) TestFileDownloadDataNegativeOffset(c *chk.C) { // fsu := getFSU() // shareURL, _ := createNewShare(c, fsu) @@ -1067,7 +1067,7 @@ func (s *FileURLSuite) TestFileDownloadDataNonExistantFile(c *chk.C) { // _, err := fileURL.Download(ctx, -1, azfile.CountToEnd, false) // c.Assert(err, chk.NotNil) -// c.Assert(strings.Contains(err.Error(), "offset must be >= 0"), chk.Equals, true) +// c.Assert(strings.Contains(err.Error(), "Offset must be >= 0"), chk.Equals, true) // } func (s *FileURLSuite) TestFileDownloadDataOffsetOutOfRange(c *chk.C) { @@ -1080,7 +1080,7 @@ func (s *FileURLSuite) TestFileDownloadDataOffsetOutOfRange(c *chk.C) { validateStorageError(c, err, azfile.ServiceCodeInvalidRange) } -// Don't check count by design. +// Don't check Count by design. // func (s *FileURLSuite) TestFileDownloadDataInvalidCount(c *chk.C) { // fsu := getFSU() // shareURL, _ := createNewShare(c, fsu) @@ -1089,7 +1089,7 @@ func (s *FileURLSuite) TestFileDownloadDataOffsetOutOfRange(c *chk.C) { // _, err := fileURL.Download(ctx, 0, -100, false) // c.Assert(err, chk.NotNil) -// c.Assert(strings.Contains(err.Error(), "count must be >= 0"), chk.Equals, true) +// c.Assert(strings.Contains(err.Error(), "Count must be >= 0"), chk.Equals, true) // } func (s *FileURLSuite) TestFileDownloadDataEntireFile(c *chk.C) { @@ -1101,7 +1101,7 @@ func (s *FileURLSuite) TestFileDownloadDataEntireFile(c *chk.C) { resp, err := fileURL.Download(ctx, 0, azfile.CountToEnd, false) c.Assert(err, chk.IsNil) - // Specifying a count of 0 results in the value being ignored + // Specifying a Count of 0 results in the value being ignored data, err := ioutil.ReadAll(resp.Response().Body) c.Assert(err, chk.IsNil) c.Assert(string(data), chk.Equals, fileDefaultData) @@ -1135,7 +1135,7 @@ func (s *FileURLSuite) TestFileDownloadDataCountOutOfRange(c *chk.C) { c.Assert(string(data), chk.Equals, fileDefaultData) } -// Don't check offset by design. +// Don't check Offset by design. // func (s *FileURLSuite) TestFileUploadRangeNegativeInvalidOffset(c *chk.C) { // fsu := getFSU() // shareURL, _ := createNewShare(c, fsu) @@ -1144,7 +1144,7 @@ func (s *FileURLSuite) TestFileDownloadDataCountOutOfRange(c *chk.C) { // _, err := fileURL.UploadRange(ctx, -2, strings.NewReader(fileDefaultData), nil) // c.Assert(err, chk.NotNil) -// c.Assert(strings.Contains(err.Error(), "offset must be >= 0"), chk.Equals, true) +// c.Assert(strings.Contains(err.Error(), "Offset must be >= 0"), chk.Equals, true) // } func (s *FileURLSuite) TestFileUploadRangeNilBody(c *chk.C) { @@ -1239,7 +1239,7 @@ func (f *FileURLSuite) TestUploadRangeFromURL(c *chk.C) { shareURL, shareName := createNewShare(c, fsu) defer delShare(c, shareURL, azfile.DeleteSnapshotsOptionNone) - // create the source file and populate it with random data at a specific offset + // create the source file and populate it with random data at a specific Offset expectedDataSize := 2048 totalFileSize := 4096 srcOffset := 999 @@ -1407,7 +1407,7 @@ func (s *FileURLSuite) TestClearRangeNonDefault1Count(c *chk.C) { c.Assert(bytes, chk.DeepEquals, []byte{0}) } -// Don't check offset by design. +// Don't check Offset by design. // func (s *FileURLSuite) TestFileClearRangeNegativeInvalidOffset(c *chk.C) { // fsu := getFSU() // shareURL, _ := getShareURL(c, fsu) @@ -1415,7 +1415,7 @@ func (s *FileURLSuite) TestClearRangeNonDefault1Count(c *chk.C) { // _, err := fileURL.ClearRange(ctx, -1, 1) // c.Assert(err, chk.NotNil) -// c.Assert(strings.Contains(err.Error(), "offset must be >= 0"), chk.Equals, true) +// c.Assert(strings.Contains(err.Error(), "Offset must be >= 0"), chk.Equals, true) // } func (s *FileURLSuite) TestFileClearRangeNegativeInvalidCount(c *chk.C) { @@ -1425,7 +1425,7 @@ func (s *FileURLSuite) TestFileClearRangeNegativeInvalidCount(c *chk.C) { _, err := fileURL.ClearRange(ctx, 0, 0) c.Assert(err, chk.NotNil) - c.Assert(strings.Contains(err.Error(), "count cannot be CountToEnd, and must be > 0"), chk.Equals, true) + c.Assert(strings.Contains(err.Error(), "Count cannot be CountToEnd, and must be > 0"), chk.Equals, true) } func setupGetRangeListTest(c *chk.C) (shareURL azfile.ShareURL, fileURL azfile.FileURL) {