@@ -8,6 +8,7 @@ const path = require('path');
8
8
require ( 'winston-daily-rotate-file' ) ;
9
9
const ProgressBar = require ( 'progress' ) ;
10
10
const BlueBirdPromise = require ( "bluebird" ) ;
11
+ const glob = require ( 'glob' ) ;
11
12
12
13
const logger = require ( '../lib/log' ) ;
13
14
const { DEFAULT_CHUNK_SIZE , MAX_CHUNK } = require ( '../lib/constants' ) ;
@@ -33,7 +34,7 @@ process.on('uncaughtException', error => {
33
34
logger . error ( error . stack ) ;
34
35
} )
35
36
36
- const upload = async ( filePath , parts = [ ] ) => {
37
+ const upload = async ( filePath , parts = [ ] , requestUrl ) => {
37
38
const bar = new ProgressBar ( ':bar [:current/:total] :percent ' , { total : totalChunk } ) ;
38
39
const uploadChunk = async ( currentChunk , currentChunkIndex , parts , isRetry ) => {
39
40
if ( parts . some ( ( { partNumber, size } ) => partNumber === currentChunkIndex && size === currentChunk . length ) ) {
@@ -47,7 +48,7 @@ const upload = async (filePath, parts = []) => {
47
48
version,
48
49
partNumber : currentChunkIndex ,
49
50
size : currentChunk . length ,
50
- currentChunk
51
+ currentChunk
51
52
} , {
52
53
headers : {
53
54
'Content-Type' : 'application/octet-stream'
@@ -75,14 +76,14 @@ const upload = async (filePath, parts = []) => {
75
76
}
76
77
}
77
78
78
- console . log ( `\n开始上传\n` )
79
- logger . info ( ' 开始上传' )
79
+ console . log ( `\n开始上传 ( ${ filePath } ) \n` ) ;
80
+ logger . info ( ` 开始上传 ( ${ filePath } )` ) ;
80
81
81
82
try {
82
83
83
- const chunkIndexs = new Array ( totalChunk ) . fill ( "" ) . map ( ( _ , index ) => index + 1 )
84
+ const chunkIndexs = new Array ( totalChunk ) . fill ( "" ) . map ( ( _ , index ) => index + 1 )
84
85
85
- await BlueBirdPromise . map ( chunkIndexs , ( currentChunkIndex ) => {
86
+ await BlueBirdPromise . map ( chunkIndexs , ( currentChunkIndex ) => {
86
87
const start = ( currentChunkIndex - 1 ) * chunkSize ;
87
88
const end = ( ( start + chunkSize ) >= fileSize ) ? fileSize : start + chunkSize - 1 ;
88
89
const stream = fs . createReadStream ( filePath , { start, end } )
@@ -113,9 +114,9 @@ const upload = async (filePath, parts = []) => {
113
114
114
115
115
116
116
-
117
117
118
- const merge = async ( ) => {
118
+
119
+ const merge = async ( ) => {
119
120
console . log ( chalk . cyan ( '正在合并分片,请稍等...' ) )
120
121
return await _mergeAllChunks ( requestUrl , {
121
122
version,
@@ -126,7 +127,7 @@ const upload = async (filePath, parts = []) => {
126
127
Authorization
127
128
} ) ;
128
129
}
129
-
130
+
130
131
131
132
try {
132
133
const res = await withRetry ( merge , 3 , 500 ) ;
@@ -140,11 +141,11 @@ const upload = async (filePath, parts = []) => {
140
141
return ;
141
142
}
142
143
143
- console . log ( chalk . green ( `\n上传完毕\n` ) )
144
+ console . log ( chalk . green ( `\n上传完毕 ( ${ filePath } ) \n` ) )
144
145
logger . info ( '************************ 上传完毕 ************************' )
145
146
}
146
147
147
- const getFileMD5Success = async ( filePath ) => {
148
+ const getFileMD5Success = async ( filePath , requestUrl ) => {
148
149
try {
149
150
const res = await _getExistChunks ( requestUrl , {
150
151
fileSize,
@@ -160,10 +161,10 @@ const getFileMD5Success = async (filePath) => {
160
161
161
162
// 上传过一部分
162
163
if ( Array . isArray ( res . data . parts ) ) {
163
- await upload ( filePath , res . data . parts ) ;
164
+ await upload ( filePath , res . data . parts , requestUrl ) ;
164
165
} else {
165
166
// 未上传过
166
- await upload ( filePath ) ;
167
+ await upload ( filePath , [ ] , requestUrl ) ;
167
168
}
168
169
} catch ( error ) {
169
170
logger . error ( error . message ) ;
@@ -173,20 +174,20 @@ const getFileMD5Success = async (filePath) => {
173
174
}
174
175
}
175
176
176
- const getFileMD5 = async ( filePath ) => {
177
+ const getFileMD5 = async ( filePath , requestUrl ) => {
177
178
totalChunk = Math . ceil ( fileSize / DEFAULT_CHUNK_SIZE ) ;
178
179
if ( totalChunk > MAX_CHUNK ) {
179
180
chunkSize = Math . ceil ( fileSize / MAX_CHUNK ) ;
180
181
totalChunk = Math . ceil ( fileSize / chunkSize ) ;
181
182
}
182
183
const spark = new SparkMD5 . ArrayBuffer ( ) ;
183
184
try {
184
- console . log ( `\n开始计算 MD5\n` )
185
- logger . info ( ' 开始计算 MD5' )
185
+ console . log ( `\n开始计算 MD5 ( ${ filePath } ) \n` ) ;
186
+ logger . info ( ` 开始计算 MD5 ( ${ filePath } )` ) ;
186
187
187
188
const bar = new ProgressBar ( ':bar [:current/:total] :percent ' , { total : totalChunk } ) ;
188
189
await new Promise ( resolve => {
189
- stream = fs . createReadStream ( filePath , { highWaterMark : chunkSize } )
190
+ stream = fs . createReadStream ( filePath , { highWaterMark : chunkSize } ) ;
190
191
stream . on ( 'data' , chunk => {
191
192
bar . tick ( ) ;
192
193
spark . append ( chunk )
@@ -198,7 +199,7 @@ const getFileMD5 = async (filePath) => {
198
199
md5 = spark . end ( ) ;
199
200
spark . destroy ( ) ;
200
201
console . log ( `\n文件 MD5:${ md5 } \n` )
201
- await getFileMD5Success ( filePath ) ;
202
+ await getFileMD5Success ( filePath , requestUrl ) ;
202
203
resolve ( ) ;
203
204
} )
204
205
} ) . catch ( error => {
@@ -212,14 +213,70 @@ const getFileMD5 = async (filePath) => {
212
213
}
213
214
}
214
215
216
+ const uploadFile = async ( filePath , size , requestUrl ) => {
217
+ fileSize = size ;
218
+ await getFileMD5 ( filePath , requestUrl ) ;
219
+ md5 = '' ;
220
+ uploadId = '' ;
221
+ fileSize = 0 ;
222
+ chunkSize = DEFAULT_CHUNK_SIZE ;
223
+ totalChunk = 0 ;
224
+ }
225
+
226
+ const uploadDir = async ( dir ) => {
227
+ let files = [ ] ;
228
+ try {
229
+ files = await new Promise ( ( resolve , reject ) => {
230
+ glob ( "**/**" , {
231
+ cwd : dir ,
232
+ root : dir
233
+ } , function ( error , files = [ ] ) {
234
+ if ( error ) {
235
+ reject ( error ) ;
236
+ } else {
237
+ resolve ( files )
238
+ }
239
+ } )
240
+ } ) ;
241
+ } catch ( error ) {
242
+ if ( error ) {
243
+ console . log ( chalk . red ( ( error . response && error . response . data ) || error . message ) ) ;
244
+ logger . error ( error . message ) ;
245
+ logger . error ( error . stack ) ;
246
+ process . exit ( 1 ) ;
247
+ } else {
248
+ resolve ( files )
249
+ }
250
+ }
251
+
252
+
253
+ for ( const file of files ) {
254
+ const filePath = path . join ( dir , file ) ;
255
+ const stat = fs . lstatSync ( filePath ) ;
256
+ const isDirectory = stat . isDirectory ( ) ;
257
+ if ( ! isDirectory ) {
258
+ const url = new URL ( `chunks/${ dir . split ( path . sep ) . pop ( ) } /${ file } ` , requestUrl . endsWith ( '/' ) ? requestUrl : `${ requestUrl } /` ) . toString ( ) ;
259
+ await uploadFile ( filePath , stat . size , url ) ;
260
+ console . log ( '************************ **** ************************' ) ;
261
+ logger . info ( '************************ **** ************************' ) ;
262
+ }
263
+ }
264
+ }
265
+
215
266
const beforeUpload = async ( filePath ) => {
267
+ const isUploadDir = argv . dir ;
268
+ let fSize = 0 ;
216
269
try {
217
270
const stat = fs . lstatSync ( filePath ) ;
218
- if ( stat . isDirectory ( ) ) {
271
+ const isDirectory = stat . isDirectory ( ) ;
272
+ if ( isDirectory && ! isUploadDir ) {
219
273
console . log ( chalk . red ( `\n${ filePath } 不合法,需指定一个文件\n` ) )
220
274
process . exit ( 1 ) ;
275
+ } else if ( ! isDirectory && isUploadDir ) {
276
+ console . log ( chalk . red ( `\n${ filePath } 不合法,需指定一个文件夹\n` ) )
277
+ process . exit ( 1 ) ;
221
278
}
222
- fileSize = stat . size ;
279
+ fSize = stat . size ;
223
280
} catch ( error ) {
224
281
if ( error . code === 'ENOENT' ) {
225
282
console . log ( chalk . red ( `未找到 ${ filePath } ` ) ) ;
@@ -230,7 +287,11 @@ const beforeUpload = async (filePath) => {
230
287
}
231
288
process . exit ( 1 ) ;
232
289
}
233
- await getFileMD5 ( filePath ) ;
290
+ if ( isUploadDir ) {
291
+ await uploadDir ( filePath ) ;
292
+ } else {
293
+ await uploadFile ( filePath , fSize , requestUrl ) ;
294
+ }
234
295
}
235
296
236
297
const onUpload = ( _username , _password ) => {
0 commit comments