diff --git a/README.md b/README.md index a51a802a1..4f7c2d0c0 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,13 @@ OSS, Object Storage Service. Equal to well known Amazon [S3](http://aws.amazon.c - [.signatureUrl(name[, options])](#signatureurlname-options) - [.putACL*(name, acl[, options])](#putaclname-acl-options) - [.getACL*(name[, options])](#getaclname-options) + - [.initMultipartUpload*(name[, options])](#initmultipartuploadname-options) + - [.uploadPart*(name, uploadId, partNo, file, start, end[, options])](#uploadpartname-uploadid-partno-file-start-end-options) + - [.uploadPartCopy*(name, uploadId, partNo, range, sourceData[, options])](#uploadpartcopyname-uploadid-partno-range-sourcedata-options) + - [.completeMultipartUpload(name, uploadId, parts[, options])](#completemultipartuploadname-uploadid-parts-options) - [.multipartUpload*(name, file[, options])](#multipartuploadname-file-options) + - [.multipartUploadCopy*(name, sourceData[, options])](#multipartuploadcopyname-sourcedata-options) + - [.listParts*(name, uploadId[, query, options])](#listparts-name-uploadid-query-options) - [.listUploads*(query[, options])](#listuploadsquery-options) - [.abortMultipartUpload*(name, uploadId[, options])](#abortmultipartuploadname-uploadid-options) - [RTMP Operations](#rtmp-operations) @@ -792,12 +798,31 @@ parameters: - 'Content-Disposition' object name for download, e.g.: `Content-Disposition: somename` - 'Content-Encoding' object content encoding for download, e.g.: `Content-Encoding: gzip` - 'Expires' expires time (milliseconds) for download, e.g.: `Expires: 3600000` + - [x-oss-callback] The callback parameter is composed of a JSON string encoded in Base64,detail [see](https://www.alibabacloud.com/help/doc-detail/31989.htm)
+ e.g.: + ```json + { + "callbackUrl":"121.101.166.30/test.php", //Required + "callbackHost":"oss-cn-hangzhou.aliyuncs.com", //Optional + "callbackBody":"{\"mimeType\":${mimeType},\"size\":${size}}", //Required + "callbackBodyType":"application/json" //Optional + } + ``` + - [x-oss-callback-var] Custom parameters are a map of key-values. You can configure the required parameters to the map. When initiating a POST callback request, the OSS puts these parameters and the system parameters described in the preceding section in the body of the POST request, so that these parameters can be easily obtained by the callback recipient.detail [see](https://www.alibabacloud.com/help/doc-detail/31989.htm) Custom parameters
+ e.g.: need to use Base64 to encode + ```json + { + "x:var1":"value1", + "x:var2":"value2" + } + ``` Success will return the object information. object: - name {String} object name +- data {Object} callback server response data, sdk use JSON.parse() return - res {Object} response info, including - status {Number} response status - headers {Object} response headers @@ -1524,9 +1549,235 @@ var result = yield store.getACL('ossdemo.txt'); console.log(result.acl); ``` -### .multipartUpload*(name, file[, options) +### .initMultipartUpload(name[, options]) +Before transmitting data in the Multipart Upload mode, +you must call the Initiate Multipart Upload interface to notify the OSS to initiate a Multipart Upload event. +The Initiate Multipart Upload interface returns a globally unique Upload ID created by the OSS server to identify this Multipart Upload event. -Upload file with [OSS multipart][oss-multipart]. +parameters: + +- name {String} object name +- [options] {Object} optional parameters + - [timeout] {Number} the operation timeout + - [mime] Mime file type e.g.: application/octet-stream + - [meta] {Object} user meta, will send with `x-oss-meta-` prefix string + - [headers] {Object} extra headers, detail see [RFC 2616](http://www.w3.org/Protocols/rfc2616/rfc2616.html) + - 'Cache-Control' cache control for download, e.g.: `Cache-Control: public, no-cache` + - 'Content-Disposition' object name for download, e.g.: `Content-Disposition: somename` + - 'Content-Encoding' object content encoding for download, e.g.: `Content-Encoding: gzip` + - 'Expires' expires time (milliseconds) for download, e.g.: `Expires: 3600000` + - [x-oss-server-side-encryption] + Specify the server-side encryption algorithm used to upload each part of this object,Type: string, Valid value: AES256 `x-oss-server-side-encryption: AES256`
+ if use in browser you should be set cors expose header x-oss-server-side-encryption + +Success will return: + +- res {Object} response info, including + - status {Number} response status + - headers {Object} response headers + - [x-oss-server-side-encryption] if set request header x-oss-server-side-encryption, will return + - size {Number} response size + - rt {Number} request total use time (ms) +- bucket {String} bucket name +- name {String} object name store on OSS +- uploadId {String} upload id, use for uploadPart, completeMultipart + +example: + +```js + var result = yield store.initMultipartUpload('object'); + console.log(result); +``` + +### .uploadPart(name, uploadId, partNo, file, start, end[, options]) +After initiating a Multipart Upload event, you can upload data in parts based on the specified object name and Upload ID. + +parameters: + +- name {String} object name +- uploadId {String} get by initMultipartUpload api +- partNo {Number} range is 1-10000, If this range is exceeded, OSS returns the InvalidArgument's error code. +- file {File|String} is File or FileName, the whole file
+ Multipart Upload requires that the size of any Part other than the last Part is greater than 100KB.
+ In Node you can use File or FileName, but in browser you only can use File. +- start {Number} part start bytes e.g: 102400 +- end {Number} part end bytes e.g: 204800 +- [options] {Object} optional parameters + - [timeout] {Number} the operation timeout + +Success will return: + +- res {Object} response info, including + - status {Number} response status + - headers {Object} response headers + - size {Number} response size + - rt {Number} request total use time (ms) +- name {String} object name store on OSS +- etag {String} object etag contains ", e.g.: "5B3C1A2E053D763E1B002CC607C5A0FE" + +example: + +```js + var name = 'object'; + var result = yield store.initMultipartUpload(name); + var uploadId = result.uploadId; + var file; //the data you want to upload, is a File or FileName(only in node) + //if file part is 10 + var partSize = 100 * 1024; + var fileSize = 10 * partSize;//you need to calculate + var dones = []; + for (var i = 1; i <= 10; i++) { + var start = partSize * (i -1); + var end = Math.min(start + partSize, fileSize); + var part = yield store.uploadPart(name, uploadId, i, file, start, end); + dones.push({ + number: i, + etag: part.etag + }); + console.log(part); + } + + //end need to call completeMultipartUpload api +``` + +### .uploadPartCopy(name, uploadId, partNo, range, sourceData[, options]) +Using Upload Part Copy, you can copy data from an existing object and upload a part of the data. +When copying a file larger than 1 GB, you must use the Upload Part Copy method. If you want to copy a file smaller than 1 GB, see Copy Object. + +parameters: + +- name {String} object name +- uploadId {String} get by initMultipartUpload api +- partNo {Number} range is 1-10000, If this range is exceeded, OSS returns the InvalidArgument's error code. +- range {String} Multipart Upload requires that the size of any Part other than the last Part is greater than 100KB, range value like `0-102400` +- sourceData {Object} + - sourceKey {String} the source object name + - sourceBucketName {String} the source bucket name +- [options] {Object} optional parameters + - [timeout] {Number} the operation timeout + - [headers] {Object} The following request header is used for the source objects specified by x-oss-copy-source. + - [x-oss-copy-source-if-match] default none
+ If the ETAG value of the source object is equal to the ETAG value provided by the user, the system performs the Copy Object operation; otherwise, the system returns the 412 Precondition Failed message. + - [x-oss-copy-source-if-none-match] default none
+ If the source object has not been modified since the time specified by the user, the system performs the Copy Object operation; otherwise, the system returns the 412 Precondition Failed message. + - [x-oss-copy-source-if-unmodified-since] default none
+ If the time specified by the received parameter is the same as or later than the modification time of the file, the system transfers the file normally, and returns 200 OK; otherwise, the system returns 412 Precondition Failed. + - [x-oss-copy-source-if-modified-since] default none
+ If the source object has been modified since the time specified by the user, the system performs the Copy Object operation; otherwise, the system returns the 412 Precondition Failed message. + +Success will return: + +- res {Object} response info, including + - status {Number} response status + - headers {Object} response headers + - size {Number} response size + - rt {Number} request total use time (ms) +- name {String} object name store on OSS +- etag {String} object etag contains ", e.g.: "5B3C1A2E053D763E1B002CC607C5A0FE" + +example: + +```js + var name = 'object'; + var result = yield store.initMultipartUpload(name); + + var partSize = 100 * 1024;//100kb + //if file part is 10 + for (var i = 1; i <= 10; i++) { + var start = partSize * (i -1); + var end = Math.min(start + partSize, fileSize); + var range = start + '-' + (end - 1); + var part = yield store.uploadPartCopy(name, result.uploadId, i, range, { + sourceKey: 'sourceKey', + sourceBucketName: 'sourceBucketName' + }); + console.log(part); + } + + //end need complete api +``` + +### .completeMultipartUpload(name, uploadId, parts[, options]) +After uploading all data parts, you must call the Complete Multipart Upload API to complete Multipart Upload for the entire file. + +parameters: + +- name {String} object name +- uploadId {String} get by initMultipartUpload api +- parts {Array} more part {Object} from uploadPartCopy, , each in the structure: + - number {Number} partNo + - etag {String} object etag contains ", e.g.: "5B3C1A2E053D763E1B002CC607C5A0FE" +- [options] {Object} optional parameters + - [timeout] {Number} the operation timeout + - [headers] {Object} extra headers, detail see [RFC 2616](http://www.w3.org/Protocols/rfc2616/rfc2616.html) + - [x-oss-callback] The callback parameter is composed of a JSON string encoded in Base64,detail [see](https://www.alibabacloud.com/help/doc-detail/31989.htm)
+ e.g.: + ```json + { + "callbackUrl":"121.101.166.30/test.php", //Required + "callbackHost":"oss-cn-hangzhou.aliyuncs.com", //Optional + "callbackBody":"{\"mimeType\":${mimeType},\"size\":${size}}", //Required + "callbackBodyType":"application/json" //Optional + } + ``` + - [x-oss-callback-var] Custom parameters are a map of key-values. You can configure the required parameters to the map. When initiating a POST callback request, the OSS puts these parameters and the system parameters described in the preceding section in the body of the POST request, so that these parameters can be easily obtained by the callback recipient.detail [see](https://www.alibabacloud.com/help/doc-detail/31989.htm) Custom parameters
+ e.g.: need to use Base64 to encode + ```json + { + "x:var1":"value1", + "x:var2":"value2" + } + ``` + + +Success will return: + +- res {Object} response info, including + - status {Number} response status + - headers {Object} response headers + - size {Number} response size + - rt {Number} request total use time (ms) +- bucket {String} bucket name +- name {String} object name store on OSS +- etag {String} object etag contains ", e.g.: "5B3C1A2E053D763E1B002CC607C5A0FE" +- data {Object} callback server response data , sdk use JSON.parse() return + +example: + +```js + + //init multipart + var name = 'object'; + var result = yield store.initMultipartUpload(name); + + //upload part + var file; //the data you want to upload, this example size is 10 * 100 * 1024 + var fileSize;//you need to calculate + var partSize = 100 * 1024;//100kb + var done = []; + //if file part is 10 + for (var i = 1; i <= 10; i++) { + var start = partSize * (i -1); + var end = Math.min(start + partSize, fileSize); + var data = file.slice(start, end); + var part = yield store.uploadPart(name, result.uploadId, i, data); + console.log(part); + done.push({ + number: i, + etag: part.res.headers.etag + }); + } + + //complete + var completeData = yield store.completeMultipartUpload(name, result.uploadId, done); + console.log(completeData); +``` + + +### .multipartUpload*(name, file[, options]) + +Upload file with [OSS multipart][oss-multipart].
+this function contains initMultipartUpload, uploadPartCopy, completeMultipartUpload. parameters: @@ -1549,6 +1800,24 @@ parameters: - 'Content-Encoding' object content encoding for download, e.g.: `Content-Encoding: gzip` - 'Expires' expires time (milliseconds) for download, e.g.: `Expires: 3600000` - **NOTE**: Some headers are [disabled in browser][disabled-browser-headers] + - [x-oss-callback] The callback parameter is composed of a JSON string encoded in Base64,detail [see](https://www.alibabacloud.com/help/doc-detail/31989.htm)
+ e.g.: + ```json + { + "callbackUrl":"121.101.166.30/test.php", //Required + "callbackHost":"oss-cn-hangzhou.aliyuncs.com", //Optional + "callbackBody":"{\"mimeType\":${mimeType},\"size\":${size}}", //Required + "callbackBodyType":"application/json" //Optional + } + ``` + - [x-oss-callback-var] Custom parameters are a map of key-values. You can configure the required parameters to the map. When initiating a POST callback request, the OSS puts these parameters and the system parameters described in the preceding section in the body of the POST request, so that these parameters can be easily obtained by the callback recipient.detail [see](https://www.alibabacloud.com/help/doc-detail/31989.htm) Custom parameters
+ e.g.: need to use Base64 to encode + ```json + { + "x:var1":"value1", + "x:var2":"value2" + } + ``` - [timeout] {Number} Milliseconds before a request is considered to be timed out Success will return: @@ -1561,6 +1830,7 @@ Success will return: - bucket {String} bucket name - name name {String} object name store on OSS - etag {String} object etag contains ", e.g.: "5B3C1A2E053D763E1B002CC607C5A0FE" +- data {Object} callback server response data, sdk use JSON.parse() return example: @@ -1650,6 +1920,172 @@ store.cancel(); ``` +### .multipartUploadCopy*(name, sourceData[, options]) + +Copy file with [OSS multipart][oss-multipart].
+this function contains head, initMultipartUpload, uploadPartCopy, completeMultipartUpload.
+When copying a file larger than 1 GB, you should use the Upload Part Copy method. If you want to copy a file smaller than 1 GB, see Copy Object. + +parameters: + +- name {String} object name +- file {String|File} file path or HTML5 Web File +- [options] {Object} optional args + - [timeout] {Number} Milliseconds before a request is considered to be timed out + - [parallel] {Number} the number of parts to be uploaded in parallel + - [partSize] {Number} the suggested size for each part + - [progress] {Function} is thunk or generator, the progress callback called after each + successful upload of one part, it will be given three parameters: + (percentage {Number}, checkpoint {Object}, res {Object}) + - [checkpoint] {Object} the checkpoint to resume upload, if this is + provided, it will continue the upload from where interrupted, + otherwise a new multipart upload will be created. + - [headers] {Object} extra headers, detail see [RFC 2616](http://www.w3.org/Protocols/rfc2616/rfc2616.html) + - 'Cache-Control' cache control for download, e.g.: `Cache-Control: public, no-cache` + - 'Content-Disposition' object name for download, e.g.: `Content-Disposition: somename` + - 'Content-Encoding' object content encoding for download, e.g.: `Content-Encoding: gzip` + - 'Expires' expires time (milliseconds) for download, e.g.: `Expires: 3600000` + - **NOTE**: Some headers are [disabled in browser][disabled-browser-headers] + - [copyheaders] {Object} only uploadPartCopy api used, detail [see](https://www.alibabacloud.com/help/doc-detail/31994.htm) + - [x-oss-copy-source-if-match] only uploadPartCopy api used, default none
+ If the ETAG value of the source object is equal to the ETAG value provided by the user, the system performs the Copy Object operation; otherwise, the system returns the 412 Precondition Failed message. + - [x-oss-copy-source-if-none-match] only uploadPartCopy api used, default none
+ If the source object has not been modified since the time specified by the user, the system performs the Copy Object operation; otherwise, the system returns the 412 Precondition Failed message. + - [x-oss-copy-source-if-unmodified-since] only uploadPartCopy api used, default none
+ If the time specified by the received parameter is the same as or later than the modification time of the file, the system transfers the file normally, and returns 200 OK; otherwise, the system returns 412 Precondition Failed. + - [x-oss-copy-source-if-modified-since] only uploadPartCopy api used, default none
+ If the source object has been modified since the time specified by the user, the system performs the Copy Object operation; otherwise, the system returns the 412 Precondition Failed message. + +Success will return: + +- res {Object} response info, including + - status {Number} response status + - headers {Object} response headers + - size {Number} response size + - rt {Number} request total use time (ms) +- bucket {String} bucket name +- name name {String} object name store on OSS +- etag {String} object etag contains ", e.g.: "5B3C1A2E053D763E1B002CC607C5A0FE" + +example: + +- Copy using multipart + +```js +var result = yield store.multipartUploadCopy('object', { + sourceKey: 'sourceKey', + sourceBucketName: 'sourceBucketName' +}); +console.log(result); + +var result = yield store.multipartUploadCopy('object', { + sourceKey: 'sourceKey', + sourceBucketName: 'sourceBucketName' +}, { + parallel: 4, + partSize: 1024 * 1024, + progress: function* (p, cpt, res) { + console.log(p); + console.log(cpt); + console.log(res.headers['x-oss-request-id']); + } +}); + +console.log(result); + +var result = yield store.multipartUploadCopy('object', { + sourceKey: 'sourceKey', + sourceBucketName: 'sourceBucketName' +}, { + checkpoint: savedCpt, + progress: function* (p, cpt, res) { + console.log(p); + console.log(cpt); + console.log(res.headers['x-oss-request-id']); + } +}); + +console.log(result); + +``` +- multipartUploadCopy with cancel + +```js + +//start upload +try { + var result = yield store.multipartUploadCopy('object', { + sourceKey: 'sourceKey', + sourceBucketName: 'sourceBucketName' + }, { + checkpoint: savedCpt, + progress: function* (p, cpt, res) { + console.log(p); + console.log(cpt); + console.log(res.headers['x-oss-request-id']); + } + }); +} catch (err) { + //if cancel will catch cancel event + if (store.isCancel()) { + //do something + } +} + +//the other event to cancel, for example: click event +//to cancel upload must use the same client instance +store.cancel(); + +``` + +### .listParts*(name, uploadId[, query, options]) + +The ListParts command can be used to list all successfully uploaded parts mapped to a specific upload ID, i.e.: those not completed and not +aborted. + +parameters: + +- name {String} object key +- uploadId {String} upload ID from initMultipartUpload api +- [query] {Object} query parameters + - [max-parts] {Number} The maximum part number in the response of the OSS. default value: 1000. + - [part-number-marker] {Number} Starting position of a specific list. A part is listed only when the part number is greater than the value of this parameter. + - [encoding-type] {String} Specify the encoding of the returned content and the encoding type. Optional value: url +- [options] {Object} optional args + - [timeout] {Number} the operation timeout + +Success will return: + +- res {Object} response info, including + - status {Number} response status + - headers {Object} response headers + - size {Number} response size + - rt {Number} request total use time (ms) +- uploadId {String} upload ID +- bucket {String} Specify the bucket name. +- name {String} object name +- PartNumberMarker {Number} Starting position of the part numbers in the listing result. +- nextPartNumberMarker {Number} If not all results are returned this time, the response request includes the NextPartNumberMarker element to indicate the value of PartNumberMarker in the next request. +- maxParts {Number} upload ID +- isTruncated {Boolean} Whether the returned result list for List Parts is truncated. The “true” indicates that not all results are returned; “false” indicates that all results are returned. +- parts {Array} The container that saves part information, each in the structure: + - PartNumber {Number} Part number. + - LastModified {Date} Time when a part is uploaded. + - ETag {String} ETag value in the content of the uploaded part. + - Size {Number} Size of the uploaded part. + +example: + +- List uploaded part + +```js + +var result = yield store.listParts('objcet', 'uploadId', { + 'max-parts': 1000 +}); +console.log(result); +``` + ### .listUploads*(query[, options]) List on-going multipart uploads, i.e.: those not completed and not diff --git a/lib/browser/bucket.js b/lib/browser/bucket.js index a016fe161..037bd77f8 100644 --- a/lib/browser/bucket.js +++ b/lib/browser/bucket.js @@ -56,6 +56,15 @@ proto.useBucket = function useBucket(name, region) { return this; }; +proto.setBucket = function useBucket(name) { + this.options.bucket = name; + return this; +}; + +proto.getBucket = function getBucket() { + return this.options.bucket; +}; + proto.putBucket = function* putBucket(name, region, options) { var params = this._bucketRequestParams('PUT', name, '', options); if (region) { diff --git a/lib/browser/client.js b/lib/browser/client.js index e9e7350ff..40d3ae044 100644 --- a/lib/browser/client.js +++ b/lib/browser/client.js @@ -109,10 +109,12 @@ merge(proto, require('./object')); // * Bucket operations // */ // merge(proto, require('./bucket')); +//multipart upload +merge(proto, require('./managed_upload')); /** * Multipart operations */ -merge(proto, require('./multipart')); +merge(proto, require('../common/multipart')); /** * Common module diff --git a/lib/browser/multipart.js b/lib/browser/managed_upload.js similarity index 62% rename from lib/browser/multipart.js rename to lib/browser/managed_upload.js index 5721efe81..845b52bd3 100644 --- a/lib/browser/multipart.js +++ b/lib/browser/managed_upload.js @@ -1,10 +1,7 @@ 'use strict'; // var debug = require('debug')('ali-oss:multipart'); -// var fs = require('fs'); var is = require('is-type-of'); -// var destroy = require('destroy'); -// var eoe = require('end-or-error'); var util = require('util'); var path = require('path'); var mime = require('mime'); @@ -61,7 +58,7 @@ proto.multipartUpload = function* multipartUpload(name, file, options) { throw new Error('partSize must not be smaller than ' + minPartSize); } - var result = yield this._initMultipartUpload(name, options); + var result = yield this.initMultipartUpload(name, options); var uploadId = result.uploadId; var partSize = this._getPartSize(fileSize, options.partSize); @@ -111,7 +108,7 @@ proto._resumeMultipart = function* _resumeMultipart(checkpoint, options) { size: pi.end - pi.start }; - var result = yield self._uploadPart(name, uploadId, partNo, data); + var result = yield self._uploadPart(name, uploadId, partNo, data, options); doneParts.push({ number: partNo, etag: result.res.headers.etag @@ -163,169 +160,10 @@ proto._resumeMultipart = function* _resumeMultipart(checkpoint, options) { throw err; } } - return yield this._completeMultipartUpload(name, uploadId, doneParts, options); + return yield this.completeMultipartUpload(name, uploadId, doneParts, options); }; -/** - * List the on-going multipart uploads - * https://help.aliyun.com/document_detail/31997.html - * @param {Object} options - * @return {Array} the multipart uploads - */ -proto.listUploads = function* listUploads(query, options) { - options = options || {}; - options.subres = 'uploads'; - var params = this._objectRequestParams('GET', '', options) - params.query = query; - params.xmlResponse = true; - params.successStatuses = [200]; - - var result = yield this.request(params); - var uploads = result.data.Upload || []; - if (!Array.isArray(uploads)) { - uploads = [uploads]; - } - uploads = uploads.map(function (up) { - return { - name: up.Key, - uploadId: up.UploadId, - initiated: up.Initiated - }; - }); - - return { - res: result.res, - uploads: uploads, - bucket: result.data.Bucket, - nextKeyMarker: result.data.NextKeyMarker, - nextUploadIdMarker: result.data.NextUploadIdMarker, - isTruncated: result.data.IsTruncated === 'true' - }; -}; - -/** - * Abort a multipart upload transaction - * @param {String} name the object name - * @param {String} uploadId the upload id - * @param {Object} options - */ -proto.abortMultipartUpload = function* abortMultipartUpload(name, uploadId, options) { - this.cancel(); - options = options || {}; - options.subres = {uploadId: uploadId}; - var params = this._objectRequestParams('DELETE', name, options); - params.successStatuses = [204]; - - var result = yield this.request(params); - return { - res: result.res - }; -}; - -/** - * Initiate a multipart upload transaction - * @param {String} name the object name - * @param {Object} options - * @return {String} upload id - */ -proto._initMultipartUpload = function* _initMultipartUpload(name, options) { - options = options || {}; - options.headers = options.headers || {}; - this._convertMetaToHeaders(options.meta, options.headers); - - options.subres = 'uploads'; - var params = this._objectRequestParams('POST', name, options); - params.mime = options.mime; - params.xmlResponse = true; - params.successStatuses = [200]; - - var result = yield this.request(params); - - return { - res: result.res, - bucket: result.data.Bucket, - name: result.data.Key, - uploadId: result.data.UploadId - }; -}; -/** - * Upload a part in a multipart upload transaction - * @param {String} name the object name - * @param {String} uploadId the upload id - * @param {Integer} partNo the part number - * @param {Object} data the body data - * @param {Object} options - */ -proto._uploadPart = function* _uploadPart(name, uploadId, partNo, data, options) { - options = options || {}; - options.headers = { - 'Content-Length': data.size - }; - - options.subres = { - partNumber: partNo, - uploadId: uploadId - }; - var params = this._objectRequestParams('PUT', name, options); - params.mime = options.mime; - params.stream = data.stream; - params.successStatuses = [200]; - - var result = yield this.request(params); - - data.stream = null; - params.stream = null; - return { - name: name, - etag: result.res.headers.etag, - res: result.res - }; -}; - -/** - * Complete a multipart upload transaction - * @param {String} name the object name - * @param {String} uploadId the upload id - * @param {Array} parts the uploaded parts - * @param {Object} options - */ -proto._completeMultipartUpload = function* _completeMultipartUpload(name, uploadId, parts, options) { - parts.sort((a, b) => a.number - b.number); - var xml = '\n\n'; - for (var i = 0; i < parts.length; i++) { - var p = parts[i]; - xml += '\n'; - xml += '' + p.number + '\n'; - xml += '' + p.etag + '\n'; - xml += '\n'; - } - xml += ''; - - options = options || {}; - options.subres = {uploadId: uploadId}; - var params = this._objectRequestParams('POST', name, options); - params.mime = 'xml'; - params.content = xml; - if (!(options.headers && options.headers['x-oss-callback'])) { - params.xmlResponse = true; - } - params.successStatuses = [200]; - var result = yield this.request(params); - - var ret = { - res: result.res, - bucket: params.bucket, - name: name, - etag: result.res.headers['etag'] - }; - - if (options.headers && options.headers['x-oss-callback']) { - ret.data = JSON.parse(result.data.toString()); - } - - return ret; -}; is.file = function (file) { return typeof(File) !== 'undefined' && file instanceof File; diff --git a/lib/bucket.js b/lib/bucket.js index cdcb9df31..680e5bc46 100644 --- a/lib/bucket.js +++ b/lib/bucket.js @@ -55,6 +55,15 @@ proto.useBucket = function useBucket(name, region) { return this; }; +proto.setBucket = function useBucket(name) { + this.options.bucket = name; + return this; +}; + +proto.getBucket = function getBucket() { + return this.options.bucket; +}; + proto.putBucket = function* putBucket(name, region, options) { var params = this._bucketRequestParams('PUT', name, '', options); if (region) { diff --git a/lib/client.js b/lib/client.js index a7d2d789e..75d5bb59b 100644 --- a/lib/client.js +++ b/lib/client.js @@ -103,15 +103,22 @@ merge(proto, require('./object')); * Bucket operations */ merge(proto, require('./bucket')); -/** - * Multipart operations - */ -merge(proto, require('./multipart')); +//multipart upload +merge(proto, require('./managed_upload')); /** * RTMP operations */ merge(proto, require('./rtmp')); +/** + * common multipart-copy support node and browser + */ +merge(proto, require('./common/multipart-copy.js')); +merge(proto, require('./common/thunkpool.js')); +/** + * Multipart operations + */ +merge(proto, require('./common/multipart')); /** * ImageClient class */ diff --git a/lib/common/multipart-copy.js b/lib/common/multipart-copy.js new file mode 100644 index 000000000..5b0edc71a --- /dev/null +++ b/lib/common/multipart-copy.js @@ -0,0 +1,216 @@ +'use strict'; + +var debug = require('debug')('ali-oss:multipart-copy'); +var copy = require('copy-to'); +var proto = exports; + + +/** + * Upload a part copy in a multipart from the source bucket/object, used with initMultipartUpload and completeMultipartUpload. + * @param {String} name copy object name + * @param {String} uploadId the upload id + * @param {Number} partNo the part number + * @param {String} range like 0-102400 part size need to copy + * @param {Object} sourceData + * {String} sourceData.sourceKey the source object name + * {String} sourceData.sourceBucketName the source bucket name + * @param {Object} options + */ +proto.uploadPartCopy = function* uploadPartCopy(name, uploadId, partNo, range, sourceData, options) { + options = options || {}; + options.headers = options.headers || {}; + var copySource = '/' + sourceData.sourceBucketName + '/' + encodeURIComponent(sourceData.sourceKey); + options.headers["x-oss-copy-source"] = copySource; + if (range) { + options.headers["x-oss-copy-source-range"] = 'bytes=' + range; + } + + options.subres = { + partNumber: partNo, + uploadId: uploadId + }; + var params = this._objectRequestParams('PUT', name, options); + params.mime = options.mime; + params.successStatuses = [200]; + + var result = yield this.request(params); + + return { + name: name, + etag: result.res.headers.etag, + res: result.res + }; +}; + +/** + * @param {String} name copy object name + * @param {Object} sourceData + * {String} sourceData.sourceKey the source object name + * {String} sourceData.sourceBucketName the source bucket name + * {Number} sourceData.startOffset data copy start byte offset, e.g: 0 + * {Number} sourceData.endOffset data copy end byte offset, e.g: 102400 + * @param {Object} options + * {Number} options.partSize + */ +proto.multipartUploadCopy = function* multipartUploadCopy(name, sourceData, options) { + this.resetCancelFlag(); + options = options || {}; + var objectMeta = yield this._getObjectMeta(sourceData.sourceBucketName, sourceData.sourceKey, {}); + var fileSize = objectMeta.res.headers['content-length']; + sourceData.startOffset = sourceData.startOffset || 0; + sourceData.endOffset = sourceData.endOffset || fileSize; + + if (options.checkpoint && options.checkpoint.uploadId) { + return yield this._resumeMultipartCopy(options.checkpoint, sourceData, options); + } + + var minPartSize = 100 * 1024; + + var copySize = sourceData.endOffset - sourceData.startOffset; + if (copySize < minPartSize) { + throw new Error('copySize must not be smaller than ' + minPartSize); + } + + if (options.partSize && options.partSize < minPartSize) { + throw new Error('partSize must not be smaller than ' + minPartSize); + } + + var result = yield this.initMultipartUpload(name, options); + var uploadId = result.uploadId; + var partSize = this._getPartSize(copySize, options.partSize); + + var checkpoint = { + name: name, + copySize: copySize, + partSize: partSize, + uploadId: uploadId, + doneParts: [] + }; + + if (options && options.progress) { + yield options.progress(0, checkpoint, result.res); + } + + return yield this._resumeMultipartCopy(checkpoint, sourceData, options); +}; + +/* + * Resume multipart copy from checkpoint. The checkpoint will be + * updated after each successful part copy. + * @param {Object} checkpoint the checkpoint + * @param {Object} options + */ +proto._resumeMultipartCopy = function* _resumeMultipartCopy(checkpoint, sourceData, options) { + if (this.isCancel()) { + throw this._makeCancelEvent(); + } + var copySize = checkpoint.copySize; + var partSize = checkpoint.partSize; + var uploadId = checkpoint.uploadId; + var doneParts = checkpoint.doneParts; + var name = checkpoint.name; + + var partOffs = this._divideMultipartCopyParts(copySize, partSize, sourceData.startOffset); + var numParts = partOffs.length; + + var uploadPartJob = function* (self, partNo, sourceData) { + if (!self.isCancel()) { + try { + var pi = partOffs[partNo - 1]; + var range = pi.start + '-' + (pi.end - 1); + + if (options.copyheaders) { + copy(options.copyheaders).to(options.headers); + } + + var result = yield self.uploadPartCopy(name, uploadId, partNo, range, sourceData, options); + + if (!self.isCancel()) { + debug('content-range ' + result.res.headers['content-range']); + doneParts.push({ + number: partNo, + etag: result.res.headers.etag + }); + checkpoint.doneParts = doneParts; + + if (options && options.progress) { + yield options.progress(doneParts.length / numParts, checkpoint, result.res); + } + } + + } catch (err) { + err.partNum = partNo; + throw err; + } + } + }; + + var all = Array.from(new Array(numParts), (x, i) => i + 1); + var done = doneParts.map(p => p.number); + var todo = all.filter(p => done.indexOf(p) < 0); + var defaultParallel = 5; + var parallel = options.parallel || defaultParallel; + + if (this.checkBrowserAndVersion('Internet Explorer', '10') || parallel === 1) { + for (var i = 0; i < todo.length; i++) { + if (this.isCancel()) { + throw this._makeCancelEvent(); + } + yield uploadPartJob(this, todo[i], sourceData); + } + } else { + // upload in parallel + var jobs = []; + for (var i = 0; i < todo.length; i++) { + jobs.push(uploadPartJob(this, todo[i], sourceData)); + } + + // start uploads jobs + var errors = yield this._thunkPool(jobs, parallel); + + if (this.isCancel()) { + jobs = null; + throw this._makeCancelEvent(); + } + + // check errors after all jobs are completed + if (errors && errors.length > 0) { + var err = errors[0]; + err.message = 'Failed to copy some parts with error: ' + err.toString() + " part_num: "+ err.partNum; + throw err; + } + } + + return yield this.completeMultipartUpload(name, uploadId, doneParts, options); +}; + +proto._divideMultipartCopyParts = function _divideMultipartCopyParts(fileSize, partSize, startOffset) { + var numParts = Math.ceil(fileSize / partSize); + + var partOffs = []; + for (var i = 0; i < numParts; i++) { + var start = partSize * i + startOffset; + var end = Math.min(start + partSize, fileSize + startOffset); + + partOffs.push({ + start: start, + end: end + }); + } + + return partOffs; +}; + +/** + * Get Object Meta + * @param {String} bucket bucket name + * @param {String} name object name + * @param {Object} options + */ +proto._getObjectMeta = function* _getObjectMeta(bucket, name, options) { + var currentBucket = this.getBucket(); + this.setBucket(bucket); + var data = yield this.head(name, options); + this.setBucket(currentBucket); + return data; +}; \ No newline at end of file diff --git a/lib/common/multipart.js b/lib/common/multipart.js new file mode 100644 index 000000000..8524edc97 --- /dev/null +++ b/lib/common/multipart.js @@ -0,0 +1,222 @@ +'use strict'; + + +var proto = exports; + + +/** + * List the on-going multipart uploads + * https://help.aliyun.com/document_detail/31997.html + * @param {Object} options + * @return {Array} the multipart uploads + */ +proto.listUploads = function* listUploads(query, options) { + options = options || {}; + options.subres = 'uploads'; + var params = this._objectRequestParams('GET', '', options) + params.query = query; + params.xmlResponse = true; + params.successStatuses = [200]; + + var result = yield this.request(params); + var uploads = result.data.Upload || []; + if (!Array.isArray(uploads)) { + uploads = [uploads]; + } + uploads = uploads.map(function (up) { + return { + name: up.Key, + uploadId: up.UploadId, + initiated: up.Initiated + }; + }); + + return { + res: result.res, + uploads: uploads, + bucket: result.data.Bucket, + nextKeyMarker: result.data.NextKeyMarker, + nextUploadIdMarker: result.data.NextUploadIdMarker, + isTruncated: result.data.IsTruncated === 'true' + }; +}; + +/** + * List the done uploadPart parts + * @param {String} name object name + * @param {String} uploadId multipart upload id + * @param {Object} query + * {Number} query.max-parts The maximum part number in the response of the OSS. Default value: 1000 + * {Number} query.part-number-marker Starting position of a specific list. + * {String} query.encoding-type Specify the encoding of the returned content and the encoding type. + * @param {Object} options + * @return {Object} result + */ +proto.listParts = function* listParts(name, uploadId, query, options) { + options = options || {}; + options.subres = { + uploadId: uploadId + }; + var params = this._objectRequestParams('GET', name, options); + params.query = query; + params.xmlResponse = true; + params.successStatuses = [200]; + + var result = yield this.request(params); + + return { + res: result.res, + uploadId: result.data.UploadId, + bucket: result.data.Bucket, + name: result.data.Key, + partNumberMarker: result.data.PartNumberMarker, + nextPartNumberMarker: result.data.NextPartNumberMarker, + maxParts: result.data.MaxParts, + isTruncated: result.data.IsTruncated, + parts: result.data.Part || [] + }; +}; + +/** + * Abort a multipart upload transaction + * @param {String} name the object name + * @param {String} uploadId the upload id + * @param {Object} options + */ +proto.abortMultipartUpload = function* abortMultipartUpload(name, uploadId, options) { + this.cancel(); + options = options || {}; + options.subres = {uploadId: uploadId}; + var params = this._objectRequestParams('DELETE', name, options); + params.successStatuses = [204]; + + var result = yield this.request(params); + return { + res: result.res + }; +}; + +/** + * Initiate a multipart upload transaction + * @param {String} name the object name + * @param {Object} options + * @return {String} upload id + */ +proto.initMultipartUpload = function* initMultipartUpload(name, options) { + options = options || {}; + options.headers = options.headers || {}; + this._convertMetaToHeaders(options.meta, options.headers); + + options.subres = 'uploads'; + var params = this._objectRequestParams('POST', name, options); + params.mime = options.mime; + params.xmlResponse = true; + params.successStatuses = [200]; + + var result = yield this.request(params); + + return { + res: result.res, + bucket: result.data.Bucket, + name: result.data.Key, + uploadId: result.data.UploadId + }; +}; + +/** + * Upload a part in a multipart upload transaction + * @param {String} name the object name + * @param {String} uploadId the upload id + * @param {Integer} partNo the part number + * @param {File} file upload File, whole File + * @param {Integer} start part start bytes e.g: 102400 + * @param {Integer} end part end bytes e.g: 204800 + * @param {Object} options + */ +proto.uploadPart = function* uploadPart(name, uploadId, partNo, file, start, end, options) { + var data = { + stream: this._createStream(file, start, end), + size: end - start + }; + return yield this._uploadPart(name, uploadId, partNo, data, options); +}; + +/** + * Complete a multipart upload transaction + * @param {String} name the object name + * @param {String} uploadId the upload id + * @param {Array} parts the uploaded parts, each in the structure: + * {Integer} number partNo + * {String} etag part etag uploadPartCopy result.res.header.etag + * @param {Object} options + */ +proto.completeMultipartUpload = function* completeMultipartUpload(name, uploadId, parts, options) { + parts.sort((a, b) => a.number - b.number); + var xml = '\n\n'; + for (var i = 0; i < parts.length; i++) { + var p = parts[i]; + xml += '\n'; + xml += '' + p.number + '\n'; + xml += '' + p.etag + '\n'; + xml += '\n'; + } + xml += ''; + + options = options || {}; + options.subres = {uploadId: uploadId}; + var params = this._objectRequestParams('POST', name, options); + params.mime = 'xml'; + params.content = xml; + if (!(options.headers && options.headers['x-oss-callback'])) { + params.xmlResponse = true; + } + params.successStatuses = [200]; + var result = yield this.request(params); + + var ret = { + res: result.res, + bucket: params.bucket, + name: name, + etag: result.res.headers['etag'] + }; + + if (options.headers && options.headers['x-oss-callback']) { + ret.data = JSON.parse(result.data.toString()); + } + + return ret; +}; + +/** + * Upload a part in a multipart upload transaction + * @param {String} name the object name + * @param {String} uploadId the upload id + * @param {Integer} partNo the part number + * @param {Object} data the body data + * @param {Object} options + */ +proto._uploadPart = function* _uploadPart(name, uploadId, partNo, data, options) { + options = options || {}; + options.headers = { + 'Content-Length': data.size + }; + + options.subres = { + partNumber: partNo, + uploadId: uploadId + }; + var params = this._objectRequestParams('PUT', name, options); + params.mime = options.mime; + params.stream = data.stream; + params.successStatuses = [200]; + + var result = yield this.request(params); + + data.stream = null; + params.stream = null; + return { + name: name, + etag: result.res.headers.etag, + res: result.res + }; +}; \ No newline at end of file diff --git a/lib/multipart.js b/lib/managed_upload.js similarity index 61% rename from lib/multipart.js rename to lib/managed_upload.js index dc19b7205..075c6c8c5 100644 --- a/lib/multipart.js +++ b/lib/managed_upload.js @@ -3,8 +3,6 @@ var debug = require('debug')('ali-oss:multipart'); var fs = require('fs'); var is = require('is-type-of'); -var destroy = require('destroy'); -var eoe = require('end-or-error'); var util = require('util'); var path = require('path'); var mime = require('mime'); @@ -62,7 +60,7 @@ proto.multipartUpload = function* multipartUpload(name, file, options) { throw new Error('partSize must not be smaller than ' + minPartSize); } - var result = yield this._initMultipartUpload(name, options); + var result = yield this.initMultipartUpload(name, options); var uploadId = result.uploadId; var partSize = this._getPartSize(fileSize, options.partSize); @@ -106,7 +104,7 @@ proto._resumeMultipart = function* _resumeMultipart(checkpoint, options) { size: pi.end - pi.start }; - var result = yield self._uploadPart(name, uploadId, partNo, data); + var result = yield self._uploadPart(name, uploadId, partNo, data, options); doneParts.push({ number: partNo, etag: result.res.headers.etag @@ -147,168 +145,9 @@ proto._resumeMultipart = function* _resumeMultipart(checkpoint, options) { } } - return yield this._completeMultipartUpload(name, uploadId, doneParts, options); + return yield this.completeMultipartUpload(name, uploadId, doneParts, options); }; -/** - * List the on-going multipart uploads - * @param {Object} options - * @return {Array} the multipart uploads - */ -proto.listUploads = function* listUploads(query, options) { - options = options || {}; - options.subres = 'uploads'; - var params = this._objectRequestParams('GET', '', options) - params.query = query; - params.xmlResponse = true; - params.successStatuses = [200]; - - var result = yield this.request(params); - var uploads = result.data.Upload || []; - if (!Array.isArray(uploads)) { - uploads = [uploads]; - } - uploads = uploads.map(function (up) { - return { - name: up.Key, - uploadId: up.UploadId, - initiated: up.Initiated - }; - }); - - return { - res: result.res, - uploads: uploads, - bucket: result.data.Bucket, - nextKeyMarker: result.data.NextKeyMarker, - nextUploadIdMarker: result.data.NextUploadIdMarker, - isTruncated: result.data.IsTruncated === 'true' - }; -}; - -/** - * Abort a multipart upload transaction - * @param {String} name the object name - * @param {String} uploadId the upload id - * @param {Object} options - */ -proto.abortMultipartUpload = function* abortMultipartUpload(name, uploadId, options) { - options = options || {}; - options.subres = {uploadId: uploadId}; - var params = this._objectRequestParams('DELETE', name, options); - params.successStatuses = [204]; - - var result = yield this.request(params); - - return { - res: result.res - }; -}; - -/** - * Initiate a multipart upload transaction - * @param {String} name the object name - * @param {Object} options - * @return {String} upload id - */ -proto._initMultipartUpload = function* _initMultipartUpload(name, options) { - options = options || {}; - options.headers = options.headers || {}; - this._convertMetaToHeaders(options.meta, options.headers); - - options.subres = 'uploads'; - var params = this._objectRequestParams('POST', name, options); - params.mime = options.mime; - params.xmlResponse = true; - params.successStatuses = [200]; - - var result = yield this.request(params); - - return { - res: result.res, - bucket: result.data.Bucket, - name: result.data.Key, - uploadId: result.data.UploadId - }; -}; - -/** - * Upload a part in a multipart upload transaction - * @param {String} name the object name - * @param {String} uploadId the upload id - * @param {Integer} partNo the part number - * @param {Object} data the body data - * @param {Object} options - */ -proto._uploadPart = function* _uploadPart(name, uploadId, partNo, data, options) { - options = options || {}; - options.headers = { - 'Content-Length': data.size - }; - - options.subres = { - partNumber: partNo, - uploadId: uploadId - }; - var params = this._objectRequestParams('PUT', name, options); - params.mime = options.mime; - params.stream = data.stream; - params.successStatuses = [200]; - - var result = yield this.request(params); - - data.stream = null; - params.stream = null; - return { - name: name, - etag: result.res.headers.etag, - res: result.res - }; -}; - -/** - * Complete a multipart upload transaction - * @param {String} name the object name - * @param {String} uploadId the upload id - * @param {Array} parts the uploaded parts - * @param {Object} options - */ -proto._completeMultipartUpload = function* _completeMultipartUpload(name, uploadId, parts, options) { - parts.sort((a, b) => a.number - b.number); - var xml = '\n\n'; - for (var i = 0; i < parts.length; i++) { - var p = parts[i]; - xml += '\n'; - xml += '' + p.number + '\n'; - xml += '' + p.etag + '\n'; - xml += '\n'; - } - xml += ''; - - options = options || {}; - options.subres = {uploadId: uploadId}; - var params = this._objectRequestParams('POST', name, options); - params.mime = 'xml'; - params.content = xml; - if (!(options.headers && options.headers['x-oss-callback'])) { - params.xmlResponse = true; - } - params.successStatuses = [200]; - var result = yield this.request(params); - - var ret = { - res: result.res, - bucket: params.bucket, - name: name, - etag: result.res.headers['etag'] - }; - - if (options.headers && options.headers['x-oss-callback']) { - ret.data = JSON.parse(result.data.toString()); - } - - return ret; -}; is.file = function (file) { return typeof(File) !== 'undefined' && file instanceof File; diff --git a/test/browser.tests.js b/test/browser.tests.js index 29f521f0f..734f979cd 100644 --- a/test/browser.tests.js +++ b/test/browser.tests.js @@ -575,7 +575,7 @@ describe('browser', function () { // var name = '/' var ids = []; for (var i = 0; i < 5; i++) { - var result = yield this.store._initMultipartUpload(name + i); + var result = yield this.store.initMultipartUpload(name + i); ids.push(result.uploadId); } // list all uploads @@ -612,7 +612,7 @@ describe('browser', function () { var name = prefix + 'multipart/list-id'; var ids = []; for (var i = 0; i < 5; i++) { - var result = yield this.store._initMultipartUpload(name); + var result = yield this.store.initMultipartUpload(name); ids.push(result.uploadId); } ids.sort(); @@ -649,7 +649,7 @@ describe('browser', function () { var foo_name = prefix + 'multipart/list-foo'; var foo_ids = []; for (var i = 0; i < 5; i++) { - var result = yield this.store._initMultipartUpload(foo_name); + var result = yield this.store.initMultipartUpload(foo_name); foo_ids.push(result.uploadId); } foo_ids.sort(); @@ -657,7 +657,7 @@ describe('browser', function () { var bar_name = prefix + 'multipart/list-bar'; var bar_ids = []; for (var i = 0; i < 5; i++) { - var result = yield this.store._initMultipartUpload(bar_name); + var result = yield this.store.initMultipartUpload(bar_name); bar_ids.push(result.uploadId); } bar_ids.sort(); @@ -689,6 +689,19 @@ describe('browser', function () { }); describe('multipartUpload()', function () { + + it.skip('should initMultipartUpload with x-oss-server-side-encryption', function* () { + //wait server bucket cors on line + var name = 'multipart-x-oss-server-side-encryption'; + var result = yield this.store.initMultipartUpload(name, { + headers: { + 'x-oss-server-side-encryption': 'AES256' + } + }); + + assert.equal(result.res.headers['x-oss-server-side-encryption'], 'AES256'); + }); + it('should fallback to putStream when file size is smaller than 100KB', function* () { var file = new File(['multipart-fallback-test'], 'multipart-fallback'); var name = prefix + 'multipart/fallback'; @@ -767,7 +780,7 @@ describe('browser', function () { assert.deepEqual(md5(object.content), md5(fileBuf)); }); - it('return requestId in init, upload part, complete', function* () { + it('should return requestId in init, upload part, complete', function* () { var fileContent = Array(1024 * 1024).fill('a').join('') var file = new File([fileContent], 'multipart-fallback'); var name = prefix + 'multipart/fallback'; @@ -815,7 +828,7 @@ describe('browser', function () { }); //multipart cancel test - it('upload file with cancel', function* () { + it('should upload file with cancel', function* () { var client = this.store; // create a file with 1M random data var fileContent = Array(1 * 1024 * 1024).fill('a').join(''); @@ -860,7 +873,7 @@ describe('browser', function () { }); - it('multipart upload file with abort', function* () { + it('should multipart upload file with abort', function* () { var client = this.store; // create a file with 1M random data var fileContent = Array(1 * 1024 * 1024).fill('a').join(''); @@ -893,6 +906,66 @@ describe('browser', function () { assert.equal(true, client.isCancel()); } }); + + it('should upload with uploadPart', function* () { + var fileContent = Array(10 * 100 * 1024).fill('a').join(''); + var file = new File([fileContent], 'multipart-upload-part'); + + var name = prefix + 'multipart/upload-part-file.js'; + var init = yield this.store.initMultipartUpload(name); + var uploadId = init.uploadId; + var partSize = 100 * 1024; + var dones = []; + for (var i = 1; i <= 10; i++) { + var start = (i-1) * partSize; + var end = Math.min(i * partSize, file.size); + var part = yield this.store.uploadPart(name, uploadId, i, file, start, end); + dones.push({ + number: i, + etag: part.res.headers.etag + }); + } + + var result = yield this.store.completeMultipartUpload(name, uploadId, dones); + assert.equal(result.res.status, 200); + }); + + it('should upload with list part', function* () { + var client = this.store; + // create a file with 1M random data + var fileContent = Array(1 * 1024 * 1024).fill('a').join(''); + var file = new File([fileContent], 'multipart-upload-list-part'); + + var name = prefix + 'multipart/upload-list-part'; + + var uploadId = null; + var options = { + progress: function (p, checkpoint) { + return function (done) { + if (p === 0) { + uploadId = checkpoint.uploadId; + } + if (p > 0.5) { + client.cancel(); + } + done(); + }; + }, + partSize: 100 * 1024 + } + try { + yield client.multipartUpload(name, file, options); + } catch (err) { + } + + var result = yield this.store.listParts(name, uploadId, { + 'max-parts': 1000 + }, {}); + + assert.equal(result.res.status, 200); + + }); + }); }); @@ -927,7 +1000,7 @@ describe('browser', function () { }); }); - describe('request err', function() { + describe('requestErr()', function() { before(function* () { var ossConfig = { region: stsConfig.region, @@ -939,7 +1012,7 @@ describe('browser', function () { }; this.store = oss(ossConfig); }); - it('request timeout exception', function* () { + it('should request timeout exception', function* () { var fileContent = Array(1024*1024).fill('a').join('') var file = new File([fileContent], 'multipart-upload-file'); @@ -955,7 +1028,7 @@ describe('browser', function () { assert.equal(timeout_err.status, -2); }); - it('request net exception', function* () { + it('should request net exception', function* () { var fileContent = Array(1024*1024).fill('a').join('') var file = new File([fileContent], 'multipart-upload-file'); diff --git a/test/multipart.test.js b/test/multipart.test.js index fe055518d..637226e82 100644 --- a/test/multipart.test.js +++ b/test/multipart.test.js @@ -46,7 +46,7 @@ describe('test/multipart.test.js', function () { var name = prefix + 'multipart/list-key'; var ids = []; for (var i = 0; i < 5; i ++) { - var result = yield this.store._initMultipartUpload(name + i); + var result = yield this.store.initMultipartUpload(name + i); ids.push(result.uploadId); } // list all uploads @@ -83,7 +83,7 @@ describe('test/multipart.test.js', function () { var name = prefix + 'multipart/list-id'; var ids = []; for (var i = 0; i < 5; i ++) { - var result = yield this.store._initMultipartUpload(name); + var result = yield this.store.initMultipartUpload(name); ids.push(result.uploadId); } ids.sort(); @@ -122,7 +122,7 @@ describe('test/multipart.test.js', function () { var foo_name = prefix + 'multipart/list-foo'; var foo_ids = []; for (var i = 0; i < 5; i ++) { - var result = yield this.store._initMultipartUpload(foo_name); + var result = yield this.store.initMultipartUpload(foo_name); foo_ids.push(result.uploadId); } foo_ids.sort(); @@ -130,7 +130,7 @@ describe('test/multipart.test.js', function () { var bar_name = prefix + 'multipart/list-bar'; var bar_ids = []; for (var i = 0; i < 5; i ++) { - var result = yield this.store._initMultipartUpload(bar_name); + var result = yield this.store.initMultipartUpload(bar_name); bar_ids.push(result.uploadId); } bar_ids.sort(); @@ -164,14 +164,25 @@ describe('test/multipart.test.js', function () { describe('multipartUpload()', function () { afterEach(mm.restore); + it('should initMultipartUpload with x-oss-server-side-encryption', function* () { + var name = 'multipart-x-oss-server-side-encryption'; + var result = yield this.store.initMultipartUpload(name, { + headers: { + 'x-oss-server-side-encryption': 'AES256' + } + }); + + assert.equal(result.res.headers['x-oss-server-side-encryption'], 'AES256'); + }); + it('should fallback to putStream when file size is smaller than 100KB', function* () { var fileName = yield utils.createTempFile('multipart-fallback', 100 * 1024 - 1); var name = prefix + 'multipart/fallback'; var progress = 0; - + var putStreamSpy = sinon.spy(this.store, 'putStream'); var uploadPartSpy = sinon.spy(this.store, '_uploadPart'); - + var result = yield this.store.multipartUpload(name, fileName, { progress: function () { progress++; @@ -334,12 +345,12 @@ describe('test/multipart.test.js', function () { }); it('should resume upload using checkpoint', function* () { - var _uploadPart = this.store._uploadPart; + var uploadPart = this.store._uploadPart; mm(this.store, '_uploadPart', function* (name, uploadId, partNo, data) { if (partNo == 5) { throw new Error('mock upload part fail.'); } else { - return _uploadPart.call(this, name, uploadId, partNo, data); + return uploadPart.call(this, name, uploadId, partNo, data); } }); @@ -402,7 +413,7 @@ describe('test/multipart.test.js', function () { assert.equal(result.data.Status, 'OK'); }); - it('return requestId in init, upload part, complete', function* () { + it('should return requestId in init, upload part, complete', function* () { var fileName = yield utils.createTempFile('multipart-upload-file', 1024 * 1024);// 1m var name = prefix + 'multipart/upload-file'; @@ -417,11 +428,71 @@ describe('test/multipart.test.js', function () { }); + it('should upload with uploadPart', function* () { + var fileName = yield utils.createTempFile('upload-with-upload-part', 10 * 100 * 1024); + + var name = prefix + 'multipart/upload-with-upload-part'; + + var init = yield this.store.initMultipartUpload(name); + var uploadId = init.uploadId; + var partSize = 100 * 1024; + var dones = []; + for (var i = 1; i <= 10; i++) { + var start = (i-1) * partSize; + var end = Math.min(i * partSize, 10 * 100 * 1024); + var part = yield this.store.uploadPart(name, uploadId, i, fileName, start, end); + dones.push({ + number: i, + etag: part.etag + }); + } + + var result = yield this.store.completeMultipartUpload(name, uploadId, dones); + assert.equal(result.res.status, 200); + }); + + it('should upload with list part', function* () { + var fileName = yield utils.createTempFile('multipart-upload-list-part', 2 * 1024 * 1024); + var name = prefix + 'multipart/upload-list-part'; + yield this.store.multipartUpload(name, fileName); + var client = this.store; + var copyName = prefix + 'multipart/upload-list-part-copy'; + var uploadId = null; + try { + yield client.multipartUploadCopy(copyName, { + sourceKey: name, + sourceBucketName: this.bucket + }, { + parallel: 1, + partSize: 100 *1024, + progress: function (p, checkpoint) { + return function (done) { + if (p === 0) { + uploadId = checkpoint.uploadId; + } + if (p > 0.5) { + client.cancel(); + } + done(); + }; + } + }); + } catch (err) { + } + + var result = yield this.store.listParts(copyName, uploadId, { + 'max-parts': 1000 + }, {}); + + assert.equal(result.res.status, 200); + + }); + }); - describe('request error', function() { + describe('requestError()', function() { - it('request timeout exception', function* () { + it('should request timeout exception', function* () { var fileName = yield utils.createTempFile('multipart-upload-file', 1024 * 1024);// 1m var name = prefix + 'multipart/upload-file'; @@ -443,7 +514,7 @@ describe('test/multipart.test.js', function () { this.store.urllib.request.restore(); }); - it('request net exception', function* () { + it('should request net exception', function* () { var fileName = yield utils.createTempFile('multipart-upload-file', 1024 * 1024);// 1m var name = prefix + 'multipart/upload-file'; @@ -468,4 +539,212 @@ describe('test/multipart.test.js', function () { }); }); + describe('multipartCopy()', function () { + + var fileName; + var name; + before(function* () { + fileName = yield utils.createTempFile('multipart-upload-file-copy', 2 * 1024 * 1024); + name = prefix + 'multipart/upload-file-with-copy'; + yield this.store.multipartUpload(name, fileName); + }); + + it('should multipart copy copy size err', function* () { + var file = yield utils.createTempFile('multipart-upload-file', 50 * 1024); + var objectKey = prefix + 'multipart/upload-file-with-copy-small'; + yield this.store.multipartUpload(objectKey, file); + var client = this.store; + var copyName = prefix + 'multipart/upload-file-with-copy-small-new'; + var copyErr = null; + try { + yield client.multipartUploadCopy(copyName, { + sourceKey: objectKey, + sourceBucketName: this.bucket + }); + } catch (err) { + copyErr = err; + } + + assert.equal(copyErr.message, 'copySize must not be smaller than 102400'); + }); + + it('should multipart copy part size err', function* () { + + var client = this.store; + var copyName = prefix + 'multipart/upload-file-with-copy-new'; + var partSizeErr = null; + try { + yield client.multipartUploadCopy(copyName, { + sourceKey: name, + sourceBucketName: this.bucket + }, { + partSize: 50 * 1024 + }); + } catch (err) { + partSizeErr = err; + } + + assert.equal(partSizeErr.message, 'partSize must not be smaller than 102400'); + }); + + it('should copy with upload part copy', function* () { + var client = this.store; + + // create a file with 1M random data + var fileName = yield utils.createTempFile('multipart-upload-file-temp-copy', 10 * 100 * 1024); + + var name = prefix + 'multipart/upload-file-temp-copy'; + yield client.multipartUpload(name, fileName); + + var copyName = prefix + 'multipart/upload-file-with-copy-new'; + var sourceData = { + sourceKey: name, + sourceBucketName: this.bucket + } + var objectMeta = yield client._getObjectMeta(sourceData.sourceBucketName, sourceData.sourceKey, {}); + var fileSize = objectMeta.res.headers['content-length']; + + var result = yield client.initMultipartUpload(copyName); + + var partSize = 100 * 1024;//100kb + var dones = []; + //if file part is 10 + for (var i = 1; i <= 10; i++) { + var start = partSize * (i -1); + var end = Math.min(start + partSize, fileSize); + var range = start + '-' + (end - 1); + var part = yield client.uploadPartCopy(copyName, result.uploadId, i, range, sourceData, {}); + dones.push({ + number: i, + etag: part.res.headers.etag + }); + } + + var complete = yield client.completeMultipartUpload(copyName, result.uploadId, dones); + + assert.equal(complete.res.status, 200); + + }); + + + + it('should copy with multipart upload copy', function* () { + var client = this.store; + var copyName = prefix + 'multipart/upload-file-with-copy-new'; + var result = yield client.multipartUploadCopy(copyName, { + sourceKey: name, + sourceBucketName: this.bucket + }, { + partSize: 256 *1024 + }); + + assert.equal(result.res.status, 200); + }); + + it('should multipart upload copy in IE10', function* () { + + var copyName = prefix + 'multipart/upload-copy-in-ie10'; + var clientTmp = oss(config); + clientTmp.useBucket(this.bucket, this.region); + var checkBrowserAndVersion = sinon.stub(clientTmp, 'checkBrowserAndVersion', function(browser, version) { + return (browser === "Internet Explorer" && version === "10"); + }); + var result = yield clientTmp.multipartUploadCopy(copyName, { + sourceKey: name, + sourceBucketName: this.bucket + }, { + partSize: 100 * 1024, + }); + assert.equal(result.res.status, 200); + checkBrowserAndVersion.restore(); + }); + + it('should multipart upload copy with parallel = 1', function* () { + + var client = this.store; + var copyName = prefix + 'multipart/upload-file-with-copy-parallel-1'; + var result = yield client.multipartUploadCopy(copyName, { + sourceKey: name, + sourceBucketName: this.bucket + }, { + partSize: 256 *1024, + parallel: 1 + }); + + assert.equal(result.res.status, 200); + }); + + it('should multipart copy with cancel and resume', function* () { + var client = this.store; + var copyName = prefix + 'multipart/upload-file-with-copy-cancel'; + var tempCheckpoint = null; + try { + yield client.multipartUploadCopy(copyName, { + sourceKey: name, + sourceBucketName: this.bucket + }, { + partSize: 100 *1024, + progress: function (p, checkpoint) { + return function (done) { + tempCheckpoint = checkpoint; + if (p > 0.5) { + client.cancel(); + } + done(); + }; + } + }); + } catch (err) { + assert.equal(client.isCancel(), true); + } + + var result = yield client.multipartUploadCopy(copyName, { + sourceKey: name, + sourceBucketName: this.bucket + }, { + partSize: 100 * 1024, + checkpoint: tempCheckpoint, + progress: function (p) { + return function (done) { + assert.equal(p > 0.5, true); + done(); + }; + } + }); + + assert.equal(result.res.status, 200); + + }); + + it('should multipart copy with exception', function* () { + + var copyName = prefix + 'multipart/upload-file-with-copy-exception'; + var clientTmp = oss(config); + clientTmp.useBucket(this.bucket, this.region); + + var stubUploadPart = sinon.stub(clientTmp, 'uploadPartCopy', function* (name, uploadId, partNo, range, sourceData, options){ + if (partNo === 1) { + throw new Error('TestErrorException'); + } + }); + + var error_msg = ""; + var partNum; + try { + yield clientTmp.multipartUploadCopy(copyName, { + sourceKey: name, + sourceBucketName: this.bucket + }); + } catch (err) { + error_msg = err.message; + partNum = err.partNum; + } + assert.equal(error_msg, + "Failed to copy some parts with error: Error: TestErrorException part_num: 1"); + assert.equal(partNum, 1); + stubUploadPart.restore(); + }); + + }); + });