From b372c758759c92f967ebf8c8166966a8bfbc6dac Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Sat, 22 Apr 2017 22:45:01 +0100 Subject: [PATCH] mb: Update pkg to retry mb with unmatched region --- .../minio/minio-go/api-error-response.go | 46 +-- .../minio/minio-go/api-get-object.go | 9 +- .../minio/minio-go/api-get-policy.go | 30 +- .../minio/minio-go/api-put-bucket.go | 92 ++++-- .../minio/minio-go/api-put-object-file.go | 18 +- .../minio-go/api-put-object-multipart.go | 106 +++++++ .../minio/minio-go/api-put-object-progress.go | 107 +++++-- .../minio/minio-go/api-put-object-readat.go | 2 +- .../minio/minio-go/api-put-object.go | 1 - .../github.com/minio/minio-go/api-remove.go | 1 - .../minio/minio-go/api-s3-datatypes.go | 12 +- vendor/github.com/minio/minio-go/api.go | 88 +++--- vendor/github.com/minio/minio-go/constants.go | 10 + vendor/github.com/minio/minio-go/core.go | 30 +- .../{encryption-cbc.go => pkg/encrypt/cbc.go} | 56 ++-- .../encrypt/interface.go} | 34 ++- .../encrypt/keys.go} | 11 +- .../minio-go/pkg/policy/bucket-policy.go | 9 +- .../s3signer/request-signature-streaming.go | 285 ++++++++++++++++++ .../pkg/s3signer/request-signature-v4.go | 20 +- .../minio/minio-go/pkg/set/stringset.go | 8 +- vendor/github.com/minio/minio-go/retry.go | 12 +- vendor/github.com/minio/minio-go/s3-error.go | 60 ++++ .../minio/minio-go/signature-type.go | 6 + vendor/vendor.json | 30 +- 25 files changed, 847 insertions(+), 236 deletions(-) rename vendor/github.com/minio/minio-go/{encryption-cbc.go => pkg/encrypt/cbc.go} (80%) rename vendor/github.com/minio/minio-go/{encryption.go => pkg/encrypt/interface.go} (53%) rename vendor/github.com/minio/minio-go/{encryption-keys.go => pkg/encrypt/keys.go} (94%) create mode 100644 vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go create mode 100644 vendor/github.com/minio/minio-go/s3-error.go diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go index fee3c7d53a..04d9a2a120 100644 --- a/vendor/github.com/minio/minio-go/api-error-response.go +++ b/vendor/github.com/minio/minio-go/api-error-response.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -48,6 +48,9 @@ type ErrorResponse struct { // Region where the bucket is located. This header is returned // only in HEAD bucket and ListObjects response. Region string + + // Headers of the returned S3 XML error + Headers http.Header `xml:"-" json:"-"` } // ToErrorResponse - Returns parsed ErrorResponse struct from body and @@ -72,8 +75,15 @@ func ToErrorResponse(err error) ErrorResponse { } } -// Error - Returns HTTP error string +// Error - Returns S3 error string. func (e ErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } return e.Message } @@ -91,6 +101,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) return ErrInvalidArgument(msg) } var errResp ErrorResponse + err := xmlDecoder(resp.Body, &errResp) // Xml decoding failed with no body, fall back to HTTP headers. if err != nil { @@ -101,9 +112,6 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) Code: "NoSuchBucket", Message: "The specified bucket does not exist.", BucketName: bucketName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), } } else { errResp = ErrorResponse{ @@ -111,9 +119,6 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) Message: "The specified key does not exist.", BucketName: bucketName, Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), } } case http.StatusForbidden: @@ -122,30 +127,37 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) Message: "Access Denied.", BucketName: bucketName, Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), } case http.StatusConflict: errResp = ErrorResponse{ Code: "Conflict", Message: "Bucket not empty.", BucketName: bucketName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), } default: errResp = ErrorResponse{ Code: resp.Status, Message: resp.Status, BucketName: bucketName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), } } } + + // Save hodID, requestID and region information + // from headers if not available through error XML. + if errResp.RequestID == "" { + errResp.RequestID = resp.Header.Get("x-amz-request-id") + } + if errResp.HostID == "" { + errResp.HostID = resp.Header.Get("x-amz-id-2") + } + if errResp.Region == "" { + errResp.Region = resp.Header.Get("x-amz-bucket-region") + } + + // Save headers returned in the API XML error + errResp.Headers = resp.Header + return errResp } diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go index ade94c2d53..8066f70f23 100644 --- a/vendor/github.com/minio/minio-go/api-get-object.go +++ b/vendor/github.com/minio/minio-go/api-get-object.go @@ -24,10 +24,12 @@ import ( "strings" "sync" "time" + + "github.com/minio/minio-go/pkg/encrypt" ) // GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materiels -func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials EncryptionMaterials) (io.Reader, error) { +func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.Reader, error) { if encryptMaterials == nil { return nil, ErrInvalidArgument("Unable to recognize empty encryption properties") @@ -44,8 +46,11 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria return nil, err } - encryptMaterials.SetupDecryptMode(encReader, st.Metadata) + // Setup object for decrytion, object is transparently + // decrypted as the consumer starts reading. + encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey)) + // Success. return encryptMaterials, nil } diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go index da0a409cd9..50b919017f 100644 --- a/vendor/github.com/minio/minio-go/api-get-policy.go +++ b/vendor/github.com/minio/minio-go/api-get-policy.go @@ -34,8 +34,12 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p if err := isValidObjectPrefix(objectPrefix); err != nil { return policy.BucketPolicyNone, err } - policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix) + policyInfo, err := c.getBucketPolicy(bucketName) if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return policy.BucketPolicyNone, nil + } return policy.BucketPolicyNone, err } return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil @@ -50,15 +54,24 @@ func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolic if err := isValidObjectPrefix(objectPrefix); err != nil { return map[string]policy.BucketPolicy{}, err } - policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix) + policyInfo, err := c.getBucketPolicy(bucketName) if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return map[string]policy.BucketPolicy{}, nil + } return map[string]policy.BucketPolicy{}, err } return policy.GetPolicies(policyInfo.Statements, bucketName), nil } +// Default empty bucket access policy. +var emptyBucketAccessPolicy = policy.BucketAccessPolicy{ + Version: "2012-10-17", +} + // Request server for current bucket policy. -func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) { +func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, error) { // Get resources properly escaped and lined up before // using them in http request. urlValues := make(url.Values) @@ -72,21 +85,18 @@ func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy. defer closeResponse(resp) if err != nil { - return policy.BucketAccessPolicy{}, err + return emptyBucketAccessPolicy, err } if resp != nil { if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" { - return policy.BucketAccessPolicy{Version: "2012-10-17"}, nil - } - return policy.BucketAccessPolicy{}, errResponse + return emptyBucketAccessPolicy, httpRespToErrorResponse(resp, bucketName, "") } } + bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) if err != nil { - return policy.BucketAccessPolicy{}, err + return emptyBucketAccessPolicy, err } policy := policy.BucketAccessPolicy{} diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go index 7c7e03f49f..11c2735a7b 100644 --- a/vendor/github.com/minio/minio-go/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -41,7 +41,14 @@ import ( // // For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html // For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c Client) MakeBucket(bucketName string, location string) error { +func (c Client) MakeBucket(bucketName string, location string) (err error) { + defer func() { + // Save the location into cache on a successful makeBucket response. + if err == nil { + c.bucketLocCache.Set(bucketName, location) + } + }() + // Validate the input arguments. if err := isValidBucketName(bucketName); err != nil { return err @@ -52,45 +59,70 @@ func (c Client) MakeBucket(bucketName string, location string) error { location = "us-east-1" } - // Instantiate the request. - req, err := c.makeBucketRequest(bucketName, location) - if err != nil { - return err - } + // Try creating bucket with the provided region, in case of + // invalid region error let's guess the appropriate region + // from S3 API headers - // Execute the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return err - } + // Create a done channel to control 'newRetryTimer' go routine. + doneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // Blank indentifier is kept here on purpose since 'range' without + // blank identifiers is only supported since go1.4 + // https://golang.org/doc/go1.4#forrange. + for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { + // Initialize the makeBucket request. + req, err := c.makeBucketRequest(bucketName, location) + if err != nil { + return err + } + + // Execute make bucket request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return err + } - if resp != nil { if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") + err := httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + if errResp.Code == "InvalidRegion" && errResp.Region != "" { + // Fetch bucket region found in headers + // of S3 error response, attempt bucket + // create again. + location = errResp.Region + continue + } + // Nothing to retry, fail. + return err } - } - // Save the location into cache on a successful makeBucket response. - c.bucketLocCache.Set(bucketName, location) + // Control reaches here when bucket create was successful, + // break out. + break + } - // Return. + // Success. return nil } -// makeBucketRequest constructs request for makeBucket. +// Low level wrapper API For makeBucketRequest. func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) { // Validate input arguments. if err := isValidBucketName(bucketName); err != nil { return nil, err } - // In case of Amazon S3. The make bucket issued on already - // existing bucket would fail with 'AuthorizationMalformed' error - // if virtual style is used. So we default to 'path style' as that - // is the preferred method here. The final location of the - // 'bucket' is provided through XML LocationConstraint data with - // the request. + // In case of Amazon S3. The make bucket issued on + // already existing bucket would fail with + // 'AuthorizationMalformed' error if virtual style is + // used. So we default to 'path style' as that is the + // preferred method here. The final location of the + // 'bucket' is provided through XML LocationConstraint + // data with the request. targetURL := c.endpointURL targetURL.Path = path.Join(bucketName, "") + "/" @@ -103,7 +135,8 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req // set UserAgent for the request. c.setUserAgent(req) - // set sha256 sum for signature calculation only with signature version '4'. + // set sha256 sum for signature calculation only with + // signature version '4'. if c.signature.isV4() { req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) } @@ -157,11 +190,14 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo if err := isValidObjectPrefix(objectPrefix); err != nil { return err } + if !bucketPolicy.IsValidBucketPolicy() { return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy)) } - policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix) - if err != nil { + + policyInfo, err := c.getBucketPolicy(bucketName) + errResponse := ToErrorResponse(err) + if err != nil && errResponse.Code != "NoSuchBucketPolicy" { return err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go index f67999f915..09fec769d7 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-file.go +++ b/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -91,25 +91,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil) } - // NOTE: S3 doesn't allow anonymous multipart requests. - if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous { - if fileSize > int64(maxSinglePutObjectSize) { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize), - Key: objectName, - BucketName: bucketName, - } - } - // Do not compute MD5 for anonymous requests to Amazon - // S3. Uploads up to 5GiB in size. - return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil) - } - // Small object upload is initiated for uploads for input data size smaller than 5MiB. if fileSize < minPartSize && fileSize >= 0 { return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil) } + // Upload all large objects as multipart. n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil) if err != nil { @@ -187,7 +173,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe close(uploadPartsCh) // Use three 'workers' to upload parts in parallel. - for w := 1; w <= 3; w++ { + for w := 1; w <= totalWorkers; w++ { go func() { // Deal with each part as it comes through the channel. for uploadReq := range uploadPartsCh { diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go index 6ca44b2107..3a299f65b2 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -66,6 +66,112 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress) } +// putObjectMultipartStreamNoChecksum - upload a large object using +// multipart upload and streaming signature for signing payload. +// N B We don't resume an incomplete multipart upload, we overwrite +// existing parts of an incomplete upload. +func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string, + reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) { + + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { + return 0, err + } + + // Get the upload id of a previously partially uploaded object or initiate a new multipart upload + uploadID, err := c.findUploadID(bucketName, objectName) + if err != nil { + return 0, err + } + if uploadID == "" { + // Initiates a new multipart request + uploadID, err = c.newUploadID(bucketName, objectName, metadata) + if err != nil { + return 0, err + } + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader := newHook(reader, progress) + + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, + io.LimitReader(hookReader, partSize), partNumber, nil, nil, partSize) + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if err == io.EOF && size < 0 { + break + } + + if err != nil { + return totalUploadedSize, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += partSize + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + // putObjectStream uploads files bigger than 64MiB, and also supports // special case where size is unknown i.e '-1'. func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go index 43f39c0fd2..f3844127e0 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-progress.go +++ b/vendor/github.com/minio/minio-go/api-put-object-progress.go @@ -20,6 +20,7 @@ import ( "io" "strings" + "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) @@ -31,7 +32,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R } // PutEncryptedObject - Encrypt and store object. -func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials EncryptionMaterials, metaData map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metaData map[string][]string, progress io.Reader) (n int64, err error) { if encryptMaterials == nil { return 0, ErrInvalidArgument("Unable to recognize empty encryption properties") @@ -44,9 +45,11 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read if metaData == nil { metaData = make(map[string][]string) } - for k, v := range encryptMaterials.GetHeaders() { - metaData[k] = v - } + + // Set the necessary encryption headers, for future decryption. + metaData[amzHeaderIV] = []string{encryptMaterials.GetIV()} + metaData[amzHeaderKey] = []string{encryptMaterials.GetKey()} + metaData[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()} return c.PutObjectWithMetadata(bucketName, objectName, encryptMaterials, metaData, progress) } @@ -96,24 +99,6 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress) } - // NOTE: S3 doesn't allow anonymous multipart requests. - if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous { - if size <= -1 { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: "Content-Length cannot be negative for anonymous requests.", - Key: objectName, - BucketName: bucketName, - } - } - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Do not compute MD5 for anonymous requests to Amazon - // S3. Uploads up to 5GiB in size. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress) - } - // putSmall object. if size < minPartSize && size >= 0 { return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress) @@ -136,3 +121,81 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R } return n, nil } + +// PutObjectStreaming using AWS streaming signature V4 +func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) { + return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, nil, nil) +} + +// PutObjectStreamingWithMetadata using AWS streaming signature V4 +func (c Client) PutObjectStreamingWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string) (n int64, err error) { + return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, metadata, nil) +} + +// PutObjectStreamingWithProgress using AWS streaming signature V4 +func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { + // NOTE: Streaming signature is not supported by GCS. + if s3utils.IsGoogleEndpoint(c.endpointURL) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: "AWS streaming signature v4 is not supported with Google Cloud Storage", + Key: objectName, + BucketName: bucketName, + } + } + // This method should return error with signature v2 minioClient. + if c.signature.isV2() { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2", + Key: objectName, + BucketName: bucketName, + } + } + + // Size of the object. + var size int64 + + // Get reader size. + size, err = getReaderSize(reader) + if err != nil { + return 0, err + } + + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + + // If size cannot be found on a stream, it is not possible + // to upload using streaming signature, fall back to multipart. + if size < 0 { + return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress) + } + + // Set signature type to streaming signature v4. + c.signature = SignatureV4Streaming + + if size < minPartSize && size >= 0 { + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + } + + // For all sizes greater than 64MiB do multipart. + n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + } + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go index c2cf56e0d1..ebf4226384 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-readat.go +++ b/vendor/github.com/minio/minio-go/api-put-object-readat.go @@ -115,7 +115,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read close(uploadPartsCh) // Receive each part number from the channel allowing three parallel uploads. - for w := 1; w <= 3; w++ { + for w := 1; w <= totalWorkers; w++ { go func() { // Read defaults to reading at 5MiB buffer. readAtBuffer := make([]byte, optimalReadBufferSize) diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go index bc2eeb2523..e218075dfa 100644 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -143,7 +143,6 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. // So we fall back to single PUT operation with the maximum limit of 5GiB. // -// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation. func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil) } diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go index 2ca84458ef..68194887a7 100644 --- a/vendor/github.com/minio/minio-go/api-remove.go +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -208,7 +208,6 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan } // RemoveIncompleteUpload aborts an partially uploaded object. -// Requires explicit authentication, no anonymous requests are allowed for multipart API. func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // Input validation. if err := isValidBucketName(bucketName); err != nil { diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go index 5bd0bcc56e..ec63d6b946 100644 --- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go @@ -41,7 +41,7 @@ type commonPrefix struct { Prefix string } -// listBucketResult container for listObjects V2 response. +// ListBucketV2Result container for listObjects response version 2. type ListBucketV2Result struct { // A response can contain CommonPrefixes only if you have // specified a delimiter. @@ -70,7 +70,7 @@ type ListBucketV2Result struct { StartAfter string } -// listBucketResult container for listObjects response. +// ListBucketResult container for listObjects response. type ListBucketResult struct { // A response can contain CommonPrefixes only if you have // specified a delimiter. @@ -102,7 +102,7 @@ type ListBucketResult struct { Prefix string } -// listMultipartUploadsResult container for ListMultipartUploads response +// ListMultipartUploadsResult container for ListMultipartUploads response type ListMultipartUploadsResult struct { Bucket string KeyMarker string @@ -131,7 +131,7 @@ type copyObjectResult struct { LastModified string // time string format "2006-01-02T15:04:05.000Z" } -// objectPart container for particular part of an object. +// ObjectPart container for particular part of an object. type ObjectPart struct { // Part number identifies the part. PartNumber int @@ -147,7 +147,7 @@ type ObjectPart struct { Size int64 } -// listObjectPartsResult container for ListObjectParts response. +// ListObjectPartsResult container for ListObjectParts response. type ListObjectPartsResult struct { Bucket string Key string @@ -185,7 +185,7 @@ type completeMultipartUploadResult struct { ETag string } -// completePart sub container lists individual part numbers and their +// CompletePart sub container lists individual part numbers and their // md5sum, part of completeMultipartUpload. type CompletePart struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index d5821c0143..c851e01694 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -20,6 +20,7 @@ import ( "bytes" "encoding/base64" "encoding/hex" + "errors" "fmt" "io" "io/ioutil" @@ -198,9 +199,6 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl clnt := new(Client) clnt.accessKeyID = accessKeyID clnt.secretAccessKey = secretAccessKey - if clnt.accessKeyID == "" || clnt.secretAccessKey == "" { - clnt.anonymous = true - } // Remember whether we are using https or not clnt.secure = secure @@ -316,8 +314,7 @@ var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") // Filter out signature value from Authorization header. func (c Client) filterSignature(req *http.Request) { - // For anonymous requests, no need to filter. - if c.anonymous { + if _, ok := req.Header["Authorization"]; !ok { return } // Handle if Signature V2. @@ -428,7 +425,7 @@ func (c Client) do(req *http.Request) (*http.Response, error) { return nil, &url.Error{ Op: urlErr.Op, URL: urlErr.URL, - Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL), + Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), } } return nil, err @@ -477,9 +474,13 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt if metadata.contentBody != nil { // Check if body is seekable then it is retryable. bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) + switch bodySeeker { + case os.Stdin, os.Stdout, os.Stderr: + isRetryable = false + } } - // Create a done channel to control 'ListObjects' go routine. + // Create a done channel to control 'newRetryTimer' go routine. doneCh := make(chan struct{}, 1) // Indicate to our routine to exit cleanly upon return. @@ -488,7 +489,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt // Blank indentifier is kept here on purpose since 'range' without // blank identifiers is only supported since go1.4 // https://golang.org/doc/go1.4#forrange. - for _ = range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter, doneCh) { + for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { // Retry executes the following function body if request has an // error until maxRetries have been exhausted, retry attempts are // performed after waiting for a given period of time in a @@ -537,14 +538,21 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt if err != nil { return nil, err } + // Save the body. errBodySeeker := bytes.NewReader(errBodyBytes) res.Body = ioutil.NopCloser(errBodySeeker) // For errors verify if its retryable otherwise fail quickly. errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) - // Bucket region if set in error response and the error code dictates invalid region, - // we can retry the request with the new region. + + // Save the body back again. + errBodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = ioutil.NopCloser(errBodySeeker) + + // Bucket region if set in error response and the error + // code dictates invalid region, we can retry the request + // with the new region. if errResponse.Code == "InvalidRegion" && errResponse.Region != "" { c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) continue // Retry. @@ -560,10 +568,6 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt continue // Retry. } - // Save the body back again. - errBodySeeker.Seek(0, 0) // Seek back to starting point. - res.Body = ioutil.NopCloser(errBodySeeker) - // For all other cases break out of the retry loop. break } @@ -610,15 +614,18 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R return nil, err } + // Anonymous request. + anonymous := c.accessKeyID == "" || c.secretAccessKey == "" + // Generate presign url if needed, return right here. if metadata.expires != 0 && metadata.presignURL { - if c.anonymous { + if anonymous { return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.") } if c.signature.isV2() { // Presign URL with signature v2. req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires) - } else { + } else if c.signature.isV4() { // Presign URL with signature v4. req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires) } @@ -638,37 +645,36 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.ContentLength = metadata.contentLength } - // Set sha256 sum only for non anonymous credentials. - if !c.anonymous { - // set sha256 sum for signature calculation only with - // signature version '4'. - if c.signature.isV4() { - shaHeader := unsignedPayload - if !c.secure { - if metadata.contentSHA256Bytes == nil { - shaHeader = hex.EncodeToString(sum256([]byte{})) - } else { - shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes) - } - } - req.Header.Set("X-Amz-Content-Sha256", shaHeader) - } - } - // set md5Sum for content protection. if metadata.contentMD5Bytes != nil { req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) } - // Sign the request for all authenticated requests. - if !c.anonymous { - if c.signature.isV2() { - // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) - } else if c.signature.isV4() { - // Add signature version '4' authorization header. - req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location) + if anonymous { + return req, nil + } // Sign the request for all authenticated requests. + + if c.signature.isV2() { + // Add signature version '2' authorization header. + req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) + } else if c.signature.isV4() || c.signature.isStreamingV4() && + method != "PUT" { + // Set sha256 sum for signature calculation only with signature version '4'. + shaHeader := unsignedPayload + if !c.secure { + if metadata.contentSHA256Bytes == nil { + shaHeader = hex.EncodeToString(sum256([]byte{})) + } else { + shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes) + } } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + + // Add signature version '4' authorization header. + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location) + } else if c.signature.isStreamingV4() { + req = s3signer.StreamingSignV4(req, c.accessKeyID, + c.secretAccessKey, location, metadata.contentLength, time.Now().UTC()) } // Return request. diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go index 057c3eef4d..6055bfdad6 100644 --- a/vendor/github.com/minio/minio-go/constants.go +++ b/vendor/github.com/minio/minio-go/constants.go @@ -45,8 +45,18 @@ const optimalReadBufferSize = 1024 * 1024 * 5 // we don't want to sign the request payload const unsignedPayload = "UNSIGNED-PAYLOAD" +// Total number of parallel workers used for multipart operation. +var totalWorkers = 3 + // Signature related constants. const ( signV4Algorithm = "AWS4-HMAC-SHA256" iso8601DateFormat = "20060102T150405Z" ) + +// Encryption headers stored along with the object. +const ( + amzHeaderIV = "X-Amz-Meta-X-Amz-Iv" + amzHeaderKey = "X-Amz-Meta-X-Amz-Key" + amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc" +) diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go index ba5f8ef7c4..90154d945b 100644 --- a/vendor/github.com/minio/minio-go/core.go +++ b/vendor/github.com/minio/minio-go/core.go @@ -16,14 +16,20 @@ package minio -import "io" +import ( + "io" -// Inherits Client and adds new methods to expose the low level S3 APIs. + "github.com/minio/minio-go/pkg/policy" +) + +// Core - Inherits Client and adds new methods to expose the low level S3 APIs. type Core struct { *Client } -// NewCoreClient - Returns new Core. +// NewCore - Returns new initialized a Core client, this CoreClient should be +// only used under special conditions such as need to access lower primitives +// and being able to use them to write your own wrappers. func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) { var s3Client Core client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure) @@ -34,12 +40,14 @@ func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) return &s3Client, nil } -// ListObjects - List the objects. +// ListObjects - List all the objects at a prefix, optionally with marker and delimiter +// you can further filter the results. func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys) } -// ListObjectsV2 - List the objects. +// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses +// continuationToken instead of marker to further filter the results. func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys) } @@ -65,7 +73,7 @@ func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size in return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size) } -// ListObjectParts - List uploaded parts of an incomplete upload. +// ListObjectParts - List uploaded parts of an incomplete upload.x func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts) } @@ -80,3 +88,13 @@ func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []C func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { return c.abortMultipartUpload(bucket, object, uploadID) } + +// GetBucketPolicy - fetches bucket access policy for a given bucket. +func (c Core) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) { + return c.getBucketPolicy(bucket) +} + +// PutBucketPolicy - applies a new bucket access policy for a given bucket. +func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPolicy) error { + return c.putBucketPolicy(bucket, bucketPolicy) +} diff --git a/vendor/github.com/minio/minio-go/encryption-cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go similarity index 80% rename from vendor/github.com/minio/minio-go/encryption-cbc.go rename to vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go index 22a2d6c245..7670e68f4b 100644 --- a/vendor/github.com/minio/minio-go/encryption-cbc.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package minio +package encrypt import ( "bytes" @@ -24,13 +24,6 @@ import ( "encoding/base64" "errors" "io" - "net/http" -) - -const ( - amzHeaderIV = "X-Amz-Meta-X-Amz-Iv" - amzHeaderKey = "X-Amz-Meta-X-Amz-Key" - amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc" ) // Crypt mode - encryption or decryption @@ -60,7 +53,7 @@ type CBCSecureMaterials struct { dstBuf *bytes.Buffer // Encryption algorithm - encryptionKey EncryptionKey + encryptionKey Key // Key to encrypts/decrypts data contentKey []byte @@ -81,11 +74,11 @@ type CBCSecureMaterials struct { blockMode cipher.BlockMode } -// NewCBCSecureMaterials builds new CBC crypter module with the specified encryption key -// (symmetric or asymmetric) -func NewCBCSecureMaterials(key EncryptionKey) (*CBCSecureMaterials, error) { +// NewCBCSecureMaterials builds new CBC crypter module with +// the specified encryption key (symmetric or asymmetric) +func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) { if key == nil { - return nil, ErrInvalidArgument("Unable to recognize empty encryption properties") + return nil, errors.New("Unable to recognize empty encryption properties") } return &CBCSecureMaterials{ srcBuf: bytes.NewBuffer([]byte{}), @@ -96,14 +89,8 @@ func NewCBCSecureMaterials(key EncryptionKey) (*CBCSecureMaterials, error) { } -// SetInputStream - set data which needs to be encrypted or decrypted -// func (s *CBCSecureMaterials) SetInputStream(stream io.Reader) { -// s.stream = stream -// } - -// SetEncryptMode - tells CBC that we are going to encrypt data +// SetupEncryptMode - tells CBC that we are going to encrypt data func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error { - // Set mode to encrypt s.cryptMode = encryptMode @@ -143,8 +130,7 @@ func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error { } // SetupDecryptMode - tells CBC that we are going to decrypt data -func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, metadata http.Header) error { - +func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, iv string, key string) error { // Set mode to decrypt s.cryptMode = decryptMode @@ -159,15 +145,17 @@ func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, metadata http.He var err error // Get IV - s.iv, err = base64.StdEncoding.DecodeString(metadata.Get(amzHeaderIV)) + s.iv, err = base64.StdEncoding.DecodeString(iv) if err != nil { return err } + // Get encrypted content key - s.cryptedKey, err = base64.StdEncoding.DecodeString(metadata.Get(amzHeaderKey)) + s.cryptedKey, err = base64.StdEncoding.DecodeString(key) if err != nil { return err } + // Decrypt content key s.contentKey, err = s.encryptionKey.Decrypt(s.cryptedKey) if err != nil { @@ -184,17 +172,19 @@ func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, metadata http.He return nil } -// GetHeaders - returns headers data to be sent along with data to the S3 server -// it contains encryption information needed to decrypt later -func (s *CBCSecureMaterials) GetHeaders() http.Header { - - m := make(http.Header) +// GetIV - return randomly generated IV (per S3 object), base64 encoded. +func (s *CBCSecureMaterials) GetIV() string { + return base64.StdEncoding.EncodeToString(s.iv) +} - m.Set(amzHeaderMatDesc, string(s.matDesc)) - m.Set(amzHeaderIV, base64.StdEncoding.EncodeToString(s.iv)) - m.Set(amzHeaderKey, base64.StdEncoding.EncodeToString(s.cryptedKey)) +// GetKey - return content encrypting key (cek) in encrypted form, base64 encoded. +func (s *CBCSecureMaterials) GetKey() string { + return base64.StdEncoding.EncodeToString(s.cryptedKey) +} - return m +// GetDesc - user provided encryption material description in JSON (UTF8) format. +func (s *CBCSecureMaterials) GetDesc() string { + return string(s.matDesc) } // Fill buf with encrypted/decrypted data diff --git a/vendor/github.com/minio/minio-go/encryption.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go similarity index 53% rename from vendor/github.com/minio/minio-go/encryption.go rename to vendor/github.com/minio/minio-go/pkg/encrypt/interface.go index 3a7843c571..2fd75033ff 100644 --- a/vendor/github.com/minio/minio-go/encryption.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go @@ -14,31 +14,37 @@ * limitations under the License. */ -package minio +// Package encrypt implements a generic interface to encrypt any stream of data. +// currently this package implements two types of encryption +// - Symmetric encryption using AES. +// - Asymmetric encrytion using RSA. +package encrypt -import ( - "io" - "net/http" -) +import "io" -// EncryptionMaterials - provides generic interface to encrypt -// any stream of data. Some crypt information can be -// save in the object metadata -type EncryptionMaterials interface { +// Materials - provides generic interface to encrypt any stream of data. +type Materials interface { - // Return encrypted/decrypted data. + // Returns encrypted/decrypted data, io.Reader compatible. Read(b []byte) (int, error) - // Metadata that will be stored with the object - GetHeaders() http.Header + // Get randomly generated IV, base64 encoded. + GetIV() (iv string) + + // Get content encrypting key (cek) in encrypted form, base64 encoded. + GetKey() (key string) + + // Get user provided encryption material description in + // JSON (UTF8) format. This is not used, kept for future. + GetDesc() (desc string) // Setup encrypt mode, further calls of Read() function // will return the encrypted form of data streamed // by the passed reader - SetupEncryptMode(io.Reader) error + SetupEncryptMode(stream io.Reader) error // Setup decrypted mode, further calls of Read() function // will return the decrypted form of data streamed // by the passed reader - SetupDecryptMode(io.Reader, http.Header) error + SetupDecryptMode(stream io.Reader, iv string, key string) error } diff --git a/vendor/github.com/minio/minio-go/encryption-keys.go b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go similarity index 94% rename from vendor/github.com/minio/minio-go/encryption-keys.go rename to vendor/github.com/minio/minio-go/pkg/encrypt/keys.go index 4d6a5a14ec..8814845e30 100644 --- a/vendor/github.com/minio/minio-go/encryption-keys.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go @@ -14,19 +14,20 @@ * limitations under the License. */ -package minio +package encrypt import ( "crypto/aes" "crypto/rand" "crypto/rsa" "crypto/x509" + "errors" ) -// EncryptionKey - generic interface to encrypt/decrypt a key. +// Key - generic interface to encrypt/decrypt a key. // We use it to encrypt/decrypt content key which is the key // that encrypt/decrypt object data. -type EncryptionKey interface { +type Key interface { // Encrypt data using to the set encryption key Encrypt([]byte) ([]byte, error) // Decrypt data using to the set encryption key @@ -140,7 +141,7 @@ func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) { } privKey, ok := priv.(*rsa.PrivateKey) if !ok { - return nil, ErrInvalidArgument("not a valid private key") + return nil, errors.New("not a valid private key") } // Parse public key from passed data @@ -151,7 +152,7 @@ func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) { pubKey, ok := pub.(*rsa.PublicKey) if !ok { - return nil, ErrInvalidArgument("not a valid public key") + return nil, errors.New("not a valid public key") } // Associate the private key with the passed public key diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go index f618059cf5..cbb889d8d3 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go @@ -34,7 +34,7 @@ const ( BucketPolicyWriteOnly = "writeonly" ) -// isValidBucketPolicy - Is provided policy value supported. +// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise. func (p BucketPolicy) IsValidBucketPolicy() bool { switch p { case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly: @@ -508,7 +508,7 @@ func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) { return readOnly, writeOnly } -// Returns policy of given bucket name, prefix in given statements. +// GetPolicy - Returns policy of given bucket name, prefix in given statements. func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy { bucketResource := awsResourcePrefix + bucketName objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" @@ -563,7 +563,7 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP return policy } -// GetPolicies returns a map of policies rules of given bucket name, prefix in given statements. +// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements. func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy { policyRules := map[string]BucketPolicy{} objResources := set.NewStringSet() @@ -590,8 +590,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol return policyRules } -// Returns new statements containing policy of given bucket name and -// prefix are appended. +// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended. func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement { out := removeStatements(statements, bucketName, prefix) // fmt.Println("out = ") diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go new file mode 100644 index 0000000000..755fd1ac55 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go @@ -0,0 +1,285 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// Reference for constants used below - +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming +const ( + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingEncoding = "aws-chunked" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF +) + +// Request headers to be ignored while calculating seed signature for +// a request. +var ignoredStreamingHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, + "Content-Type": true, +} + +// getSignedChunkLength - calculates the length of chunk metadata +func getSignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + chunkSigConstLen + + signatureStrLen + + crlfLen + + chunkDataSize + + crlfLen +} + +// getStreamLength - calculates the length of the overall stream (data + metadata) +func getStreamLength(dataLen, chunkSize int64) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getSignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getSignedChunkLength(remainingBytes) + } + streamLen += getSignedChunkLength(0) + return streamLen +} + +// buildChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingPayloadHdr, + t.Format(iso8601DateFormat), + getScope(region, t), + previousSig, + emptySHA256, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareStreamingRequest(req *http.Request, dataLen int64, timestamp time.Time) { + // Set x-amz-content-sha256 header. + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + req.Header.Set("Content-Encoding", streamingEncoding) + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + + // Set content length with streaming signature for each chunk included. + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) +} + +// buildChunkHeader - returns the chunk header. +// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n +func buildChunkHeader(chunkLen int64, signature string) []byte { + return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string) string { + + chunkStringToSign := buildChunkStringToSign(reqTime, region, + previousSignature, chunkData) + signingKey := getSigningKey(secretAccessKey, region, reqTime) + return getSignature(signingKey, chunkStringToSign) +} + +// getSeedSignature - returns the seed signature for a given request. +func (s *StreamingReader) setSeedSignature(req *http.Request) { + // Get canonical request + canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest) + + signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime) + + // Calculate signature. + s.seedSignature = getSignature(signingKey, stringToSign) +} + +// StreamingReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingReader struct { + accessKeyID string + secretAccessKey string + region string + prevSignature string + seedSignature string + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + reqTime time.Time + chunkNum int + totalChunks int + lastChunkSize int +} + +// signChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingReader) signChunk(chunkLen int) { + // Compute chunk signature for next header + signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + // Write chunk header into streaming buffer + chunkHdr := buildChunkHeader(int64(chunkLen), signature) + s.buf.Write(chunkHdr) + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + s.buf.Write([]byte("\r\n")) + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// setStreamingAuthHeader - builds and sets authorization header value +// for streaming signature. +func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { + credential := GetCredential(s.accessKeyID, s.region, s.reqTime) + authParts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), + "Signature=" + s.seedSignature, + } + + // Set authorization header. + auth := strings.Join(authParts, ",") + req.Header.Set("Authorization", auth) +} + +// StreamingSignV4 - provides chunked upload signatureV4 support by +// implementing io.Reader. +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, + region string, dataLen int64, reqTime time.Time) *http.Request { + + // Set headers needed for streaming signature. + prepareStreamingRequest(req, dataLen, reqTime) + + stReader := &StreamingReader{ + baseReadCloser: req.Body, + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + region: region, + reqTime: reqTime, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + + // Add the request headers required for chunk upload signing. + + // Compute the seed signature. + stReader.setSeedSignature(req) + + // Set the authorization header with the seed signature. + stReader.setStreamingAuthHeader(req) + + // Set seed signature as prevSignature for subsequent + // streaming signing process. + stReader.prevSignature = stReader.seedSignature + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + if err == nil || err == io.ErrUnexpectedEOF { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.signChunk(s.chunkBufLen) + break + } + + } else if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, io.ErrUnexpectedEOF + } + + // Sign the chunk and write it to s.buf. + s.signChunk(0) + break + + } else { + return 0, err + } + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go index 3322b67ccf..245fb08c3d 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go @@ -70,7 +70,7 @@ const ( /// /// Is skipped for obvious reasons /// -var ignoredHeaders = map[string]bool{ +var v4IgnoredHeaders = map[string]bool{ "Authorization": true, "Content-Type": true, "Content-Length": true, @@ -122,7 +122,7 @@ func getHashedPayload(req http.Request) string { // getCanonicalHeaders generate a list of request headers for // signature. -func getCanonicalHeaders(req http.Request) string { +func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { var headers []string vals := make(map[string][]string) for k, vv := range req.Header { @@ -161,7 +161,7 @@ func getCanonicalHeaders(req http.Request) string { // getSignedHeaders generate all signed request headers. // i.e lexically sorted, semicolon-separated list of lowercase // request header names. -func getSignedHeaders(req http.Request) string { +func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { var headers []string for k := range req.Header { if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { @@ -183,14 +183,14 @@ func getSignedHeaders(req http.Request) string { // \n // \n // -func getCanonicalRequest(req http.Request) string { +func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string { req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) canonicalRequest := strings.Join([]string{ req.Method, s3utils.EncodePath(req.URL.Path), req.URL.RawQuery, - getCanonicalHeaders(req), - getSignedHeaders(req), + getCanonicalHeaders(req, ignoredHeaders), + getSignedHeaders(req, ignoredHeaders), getHashedPayload(req), }, "\n") return canonicalRequest @@ -219,7 +219,7 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, credential := GetCredential(accessKeyID, location, t) // Get all signed headers. - signedHeaders := getSignedHeaders(req) + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) // Set URL query. query := req.URL.Query() @@ -231,7 +231,7 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, req.URL.RawQuery = query.Encode() // Get canonical request. - canonicalRequest := getCanonicalRequest(req) + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) // Get string to sign from canonical request. stringToSign := getStringToSignV4(t, location, canonicalRequest) @@ -273,7 +273,7 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) // Get canonical request. - canonicalRequest := getCanonicalRequest(req) + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) // Get string to sign from canonical request. stringToSign := getStringToSignV4(t, location, canonicalRequest) @@ -285,7 +285,7 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht credential := GetCredential(accessKeyID, location, t) // Get all signed headers. - signedHeaders := getSignedHeaders(req) + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) // Calculate signature. signature := getSignature(signingKey, stringToSign) diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go index 55084d4616..9f33488e01 100644 --- a/vendor/github.com/minio/minio-go/pkg/set/stringset.go +++ b/vendor/github.com/minio/minio-go/pkg/set/stringset.go @@ -25,8 +25,8 @@ import ( // StringSet - uses map as set of strings. type StringSet map[string]struct{} -// keys - returns StringSet keys. -func (set StringSet) keys() []string { +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { keys := make([]string, 0, len(set)) for k := range set { keys = append(keys, k) @@ -141,7 +141,7 @@ func (set StringSet) Union(sset StringSet) StringSet { // MarshalJSON - converts to JSON data. func (set StringSet) MarshalJSON() ([]byte, error) { - return json.Marshal(set.keys()) + return json.Marshal(set.ToSlice()) } // UnmarshalJSON - parses JSON data and creates new set with it. @@ -169,7 +169,7 @@ func (set *StringSet) UnmarshalJSON(data []byte) error { // String - returns printable string of the set. func (set StringSet) String() string { - return fmt.Sprintf("%s", set.keys()) + return fmt.Sprintf("%s", set.ToSlice()) } // NewStringSet - creates new string set. diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go index d680c6cdac..1de5107e4a 100644 --- a/vendor/github.com/minio/minio-go/retry.go +++ b/vendor/github.com/minio/minio-go/retry.go @@ -33,8 +33,16 @@ const MaxJitter = 1.0 // NoJitter disables the use of jitter for randomizing the exponential backoff time const NoJitter = 0.0 -// newRetryTimer creates a timer with exponentially increasing delays -// until the maximum retry attempts are reached. +// DefaultRetryUnit - default unit multiplicative per retry. +// defaults to 1 second. +const DefaultRetryUnit = time.Second + +// DefaultRetryCap - Each retry attempt never waits no longer than +// this maximum time duration. +const DefaultRetryCap = time.Second * 30 + +// newRetryTimer creates a timer with exponentially increasing +// delays until the maximum retry attempts are reached. func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { attemptCh := make(chan int) diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go new file mode 100644 index 0000000000..11b40a0f88 --- /dev/null +++ b/vendor/github.com/minio/minio-go/s3-error.go @@ -0,0 +1,60 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyID": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. +} diff --git a/vendor/github.com/minio/minio-go/signature-type.go b/vendor/github.com/minio/minio-go/signature-type.go index cae74cd010..36e999a263 100644 --- a/vendor/github.com/minio/minio-go/signature-type.go +++ b/vendor/github.com/minio/minio-go/signature-type.go @@ -24,6 +24,7 @@ const ( Latest SignatureType = iota SignatureV4 SignatureV2 + SignatureV4Streaming ) // isV2 - is signature SignatureV2? @@ -35,3 +36,8 @@ func (s SignatureType) isV2() bool { func (s SignatureType) isV4() bool { return s == SignatureV4 || s == Latest } + +// isStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) isStreamingV4() bool { + return s == SignatureV4Streaming +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 171a2ad8a9..cb6d8bfed6 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -50,22 +50,28 @@ "revisionTime": "2015-10-24T22:24:27-07:00" }, { - "checksumSHA1": "mDmRRka//itovrS4jtcpVdr0Weg=", + "checksumSHA1": "ymC5RhgF7MLQ7Gj1w0AcCQykQbY=", "path": "github.com/minio/minio-go", - "revision": "6d434a3827cdfa3fcf43bde6864fbdd2aad19089", - "revisionTime": "2017-04-08T08:09:03Z" + "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", + "revisionTime": "2017-04-26T18:23:05Z" }, { - "checksumSHA1": "qTxOBp3GVxCC70ykb7Hxg6UgWwA=", + "checksumSHA1": "lsxCcRcNUDxhQyO999SOdvKzzfM=", + "path": "github.com/minio/minio-go/pkg/encrypt", + "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", + "revisionTime": "2017-04-26T18:23:05Z" + }, + { + "checksumSHA1": "neH34/65OXeKHM/MlV8MbhcdFBc=", "path": "github.com/minio/minio-go/pkg/policy", - "revision": "583c261267bc1022bb3e046c7d01c49d3f56edaa", - "revisionTime": "2016-09-03T08:42:23Z" + "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", + "revisionTime": "2017-04-26T18:23:05Z" }, { - "checksumSHA1": "m/6/na9lVtamkfmIdIOi5pdccgw=", + "checksumSHA1": "/5IXp1nGKqOfHn8Piiod3OCkG2U=", "path": "github.com/minio/minio-go/pkg/s3signer", - "revision": "532b920ff28900244a2ef7d07468003df36fe7c5", - "revisionTime": "2016-12-20T20:43:13Z" + "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", + "revisionTime": "2017-04-26T18:23:05Z" }, { "checksumSHA1": "gRnCFKb4x83GBLVUZXoOjujd+U0=", @@ -74,10 +80,10 @@ "revisionTime": "2017-01-05T04:45:20Z" }, { - "checksumSHA1": "A8QOw1aWwc+RtjGozY0XeS5varo=", + "checksumSHA1": "maUy+dbN6VfTTnfErrAW2lLit1w=", "path": "github.com/minio/minio-go/pkg/set", - "revision": "9e734013294ab153b0bdbe182738bcddd46f1947", - "revisionTime": "2016-08-18T00:31:20Z" + "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", + "revisionTime": "2017-04-26T18:23:05Z" }, { "checksumSHA1": "v1OEKlUTmnwiQ2RxIFfDhz77Uu0=",