From 9434073404aef173e855c634d8f36a3514cc0564 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Fri, 11 Sep 2020 22:32:26 +0200 Subject: [PATCH 01/25] update rclone version --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d326540..0882ed9 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/donwa/gclone require ( github.com/pkg/errors v0.8.1 - github.com/rclone/rclone v1.51.0 + github.com/rclone/rclone v1.53.0 github.com/spf13/cobra v0.0.5 github.com/stretchr/testify v1.4.0 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 From 197034838338ddcdb171833daaa19ed99e27dcea Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Fri, 11 Sep 2020 22:33:27 +0200 Subject: [PATCH 02/25] update version --- gclone.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gclone.go b/gclone.go index bec0824..ebbaff7 100644 --- a/gclone.go +++ b/gclone.go @@ -10,6 +10,6 @@ import ( ) func main() { - fs.Version = fs.Version+"-mod1.3.1" + fs.Version = fs.Version+"-mod-b1342-0.1" cmd.Main() } From 5fecade62e0d523f8d1b4f5a187d5220b1c050fb Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Fri, 11 Sep 2020 22:36:49 +0200 Subject: [PATCH 03/25] setup --- .github/workflows/release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 979f518..19384ff 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,6 +5,7 @@ on: tags: - '*' + jobs: goreleaser: runs-on: ubuntu-latest From 8f8ae7a2df14a353f3c660f034ee178cb2f3aea1 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Fri, 11 Sep 2020 22:41:42 +0200 Subject: [PATCH 04/25] on push on master --- .github/workflows/release.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 19384ff..bbb6502 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,9 +2,8 @@ name: goreleaser on: push: - tags: - - '*' - + branches: + - master jobs: goreleaser: From 0c296e3c6289476b8688ca451d6e8d513fd81029 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Fri, 11 Sep 2020 23:37:40 +0200 Subject: [PATCH 05/25] Update drive.go --- backend/drive/drive.go | 64 ++++++++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 21 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 754e5f9..6caa79a 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -233,6 +233,16 @@ in with the ID of the root folder. Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Hide: fs.OptionHideConfigurator, Advanced: true, + },{ + Name: "service_account_start", + Default: -1, + Help: "Start of SA to use", + Advanced: true, + },{ + Name: "service_account_end", + Default: -1, + Help: "End of SA to use", + Advanced: true, }, { Name: "team_drive", Help: "ID of the Team Drive", @@ -328,7 +338,6 @@ Photos folder" option in your google drive settings. You can then copy or move the photos locally and use the date the image was taken (created) set as the modification date.`, Advanced: true, - Hide: fs.OptionHideConfigurator, }, { Name: "use_shared_date", Default: false, @@ -340,7 +349,6 @@ unexpected consequences when uploading/downloading files. If both this flag and "--drive-use-created-date" are set, the created date is used.`, Advanced: true, - Hide: fs.OptionHideConfigurator, }, { Name: "list_chunk", Default: 1000, @@ -414,7 +422,6 @@ doing rclone ls/lsl/lsf/lsjson/etc only. If you do use this flag for syncing (not recommended) then you will need to use --ignore size also.`, Advanced: true, - Hide: fs.OptionHideConfigurator, }, { Name: "v2_download_min_size", Default: fs.SizeSuffix(-1), @@ -500,9 +507,10 @@ type Options struct { Scope string `config:"scope"` RootFolderID string `config:"root_folder_id"` ServiceAccountFile string `config:"service_account_file"` - // 添加一个变量 ServiceAccountFilePath string `config:"service_account_file_path"` ServiceAccountCredentials string `config:"service_account_credentials"` + ServiceAccountFileStart int `config:"service_account_start"` + ServiceAccountFileEnd int `config:"service_account_end"` TeamDriveID string `config:"team_drive"` AuthOwnerOnly bool `config:"auth_owner_only"` UseTrash bool `config:"use_trash"` @@ -623,7 +631,7 @@ func (f *Fs) shouldRetry(err error) (bool, error) { if len(gerr.Errors) > 0 { reason := gerr.Errors[0].Reason if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" { - // 如果存在 ServiceAccountFilePath,调用 changeSvc, 重试 + // If ServiceAccountFilePath exists, call changeSvc and try again if(f.opt.ServiceAccountFilePath != ""){ f.waitChangeSvc.Lock() f.changeSvc() @@ -641,11 +649,11 @@ func (f *Fs) shouldRetry(err error) (bool, error) { return false, err } -// 替换 f.svc 函数 +// Replace f.svc function func (f *Fs) changeSvc(){ opt := &f.opt; /** - * 获取sa文件列表 + * Get the list of sa files */ if(opt.ServiceAccountFilePath != "" && len(f.ServiceAccountFiles) == 0){ f.ServiceAccountFiles = make(map[string]int) @@ -661,25 +669,41 @@ func (f *Fs) changeSvc(){ } } } - // 如果读取文件夹后还是0 , 退出 + + // If it is still 0 after reading the folder, exit if(len(f.ServiceAccountFiles) <= 0){ return ; } + startSA := opt.service_account_start + // If it is still 0 after reading the folder, exit + if(opt.service_account_start == -1){ + startSA := 0 ; + } + endSA := opt.service_account_end + // If it is still 0 after reading the folder, exit + if(opt.service_account_end == -1){ + endSA := len(f.ServiceAccountFiles) ; + } /** - * 从sa文件列表 随机取一个,并删除列表中的元素 + * Take the first SA, then if already used, the next one */ - r := rand.Intn(len(f.ServiceAccountFiles)) - for k := range f.ServiceAccountFiles { - if r == 0 { + r := startSA + for k := range f.ServiceAccountFiles { + if f.ServiceAccountFiles[r] != nil { opt.ServiceAccountFile = k } - r-- + if r > endSA { + break + } + r++ } - // 从库存中删除 + opt.ServiceAccountFile=f.ServiceAccountFiles[startSA] + + // Remove from inventory delete(f.ServiceAccountFiles, opt.ServiceAccountFile) /** - * 创建 client 和 svc + * Create client and svc */ loadedCreds, _ := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile)) opt.ServiceAccountCredentials = string(loadedCreds) @@ -690,6 +714,7 @@ func (f *Fs) changeSvc(){ f.client = oAuthClient f.svc, err = drive.New(f.client) fmt.Println("gclone sa file:", opt.ServiceAccountFile) + fmt.Println("gclone sa number:", r) } // parseParse parses a drive 'url' @@ -828,8 +853,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie fields += ",quotaBytesUsed" } - fields = fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", fields) - + fields = fmt.Sprintf("files(%s),nextPageToken", fields) + OUTER: for { var files *drive.FileList @@ -840,9 +865,6 @@ OUTER: if err != nil { return false, errors.Wrap(err, "couldn't list directory") } - if files.IncompleteSearch { - fs.Errorf(f, "search result INCOMPLETE") - } for _, item := range files.Files { item.Name = f.opt.Enc.ToStandardName(item.Name) // Check the case of items is correct since @@ -1102,7 +1124,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { err := configstruct.Set(m, opt) //----------------------------------------------------------- maybeIsFile := false - // 添加 {id} 作为根目录功能 + // Add {id} as the root directory function if(path != "" && path[0:1] == "{"){ idIndex := strings.Index(path,"}") if(idIndex > 0){ From c2333e2dcaddeea5fe93cf3bbb05689e0d2271dc Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 00:06:44 +0200 Subject: [PATCH 06/25] merge with 1.5.3 --- backend/drive/drive.go | 1272 ++++++++++++++++++++++++++++------------ 1 file changed, 904 insertions(+), 368 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 6caa79a..b4de709 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -15,21 +15,20 @@ import ( "io" "io/ioutil" "log" - "math/rand" "mime" "net/http" - "net/url" - "os" "path" "sort" "strconv" "strings" "sync" + "sync/atomic" "text/template" "time" "github.com/pkg/errors" "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configstruct" @@ -37,9 +36,11 @@ import ( "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/readers" @@ -55,6 +56,8 @@ const ( rcloneClientID = "202264815644.apps.googleusercontent.com" rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg" driveFolderType = "application/vnd.google-apps.folder" + shortcutMimeType = "application/vnd.google-apps.shortcut" + shortcutMimeTypeDangling = "application/vnd.google-apps.shortcut.dangling" // synthetic mime type for internal use timeFormatIn = time.RFC3339 timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" defaultMinSleep = fs.Duration(100 * time.Millisecond) @@ -66,7 +69,9 @@ const ( // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. minChunkSize = 256 * fs.KibiByte defaultChunkSize = 8 * fs.MebiByte - partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink" + partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks" + listRGrouping = 50 // number of IDs to search at once when using ListR + listRInputBuffer = 1000 // size of input buffer when using ListR ) // Globals @@ -158,6 +163,7 @@ func init() { Name: "drive", Description: "Google Drive", NewFs: NewFs, + CommandHelp: commandHelp, Config: func(name string, m configmap.Mapper) { ctx := context.TODO() // Parse config into Options struct @@ -176,7 +182,7 @@ func init() { } if opt.ServiceAccountFile == "" { - err = oauthutil.Config("drive", name, m, driveConfig) + err = oauthutil.Config("drive", name, m, driveConfig, nil) if err != nil { log.Fatalf("Failed to configure token: %v", err) } @@ -186,13 +192,7 @@ func init() { log.Fatalf("Failed to configure team drive: %v", err) } }, - Options: []fs.Option{{ - Name: config.ConfigClientID, - Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.", - }, { - Name: config.ConfigClientSecret, - Help: "Google Application Client Secret\nSetting your own is recommended.", - }, { + Options: append(oauthutil.SharedOptions, []fs.Option{{ Name: "scope", Help: "Scope that rclone should use when requesting access from drive.", Examples: []fs.OptionExample{{ @@ -215,20 +215,13 @@ func init() { Name: "root_folder_id", Help: `ID of the root folder Leave blank normally. - Fill in to access "Computers" folders (see docs), or for rclone to use a non root folder as its starting point. - -Note that if this is blank, the first time rclone runs it will fill it -in with the ID of the root folder. `, }, { Name: "service_account_file", - Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", + Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, }, { - Name: "service_account_file_path", - Help: "Service Account Credentials JSON file path .\n", - },{ Name: "service_account_credentials", Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Hide: fs.OptionHideConfigurator, @@ -267,15 +260,11 @@ in with the ID of the root folder. Name: "skip_checksum_gphotos", Default: false, Help: `Skip MD5 checksum on Google photos and videos only. - Use this if you get checksum errors when transferring Google photos or videos. - Setting this flag will cause Google photos and videos to return a blank MD5 checksum. - -Google photos are identifed by being in the "photos" space. - +Google photos are identified by being in the "photos" space. Corrupted checksums are caused by Google modifying the image/video but not updating the checksum.`, Advanced: true, @@ -283,11 +272,9 @@ not updating the checksum.`, Name: "shared_with_me", Default: false, Help: `Only show files that are shared with me. - Instructs rclone to operate on your "Shared with me" folder (where Google Drive lets you access the files and folders others have shared with you). - This works both with the "list" (lsd, lsl, etc) and the "copy" commands (copy, sync, etc), and with all other commands too.`, Advanced: true, @@ -296,6 +283,11 @@ commands (copy, sync, etc), and with all other commands too.`, Default: false, Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.", Advanced: true, + }, { + Name: "starred_only", + Default: false, + Help: "Only show files that are starred.", + Advanced: true, }, { Name: "formats", Default: "", @@ -321,34 +313,30 @@ commands (copy, sync, etc), and with all other commands too.`, Name: "use_created_date", Default: false, Help: `Use file created date instead of modified date., - Useful when downloading data and you want the creation date used in place of the last modified date. - **WARNING**: This flag may have some unexpected consequences. - When uploading to your drive all files will be overwritten unless they haven't been modified since their creation. And the inverse will occur while downloading. This side effect can be avoided by using the "--checksum" flag. - This feature was implemented to retain photos capture date as recorded by google photos. You will first need to check the "Create a Google Photos folder" option in your google drive settings. You can then copy or move the photos locally and use the date the image was taken (created) set as the modification date.`, Advanced: true, + Hide: fs.OptionHideConfigurator, }, { Name: "use_shared_date", Default: false, Help: `Use date file was shared instead of modified date. - Note that, as with "--drive-use-created-date", this flag may have unexpected consequences when uploading/downloading files. - If both this flag and "--drive-use-created-date" are set, the created date is used.`, Advanced: true, + Hide: fs.OptionHideConfigurator, }, { Name: "list_chunk", Default: 1000, @@ -357,22 +345,13 @@ date is used.`, }, { Name: "impersonate", Default: "", - Help: "Impersonate this user when using a service account.", + Help: `Impersonate this user when using a service account.`, Advanced: true, }, { Name: "alternate_export", Default: false, - Help: `Use alternate export URLs for google documents export., - -If this option is set this instructs rclone to use an alternate set of -export URLs for drive documents. Users have reported that the -official export URLs can't export large documents, whereas these -unofficial ones can. - -See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background, -[this google drive issue](https://issuetracker.google.com/issues/36761333) and -[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`, - Advanced: true, + Help: "Deprecated: no longer needed", + Hide: fs.OptionHideBoth, }, { Name: "upload_cutoff", Default: defaultChunkSize, @@ -382,17 +361,14 @@ See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for backg Name: "chunk_size", Default: defaultChunkSize, Help: `Upload chunk size. Must a power of 2 >= 256k. - Making this larger will improve performance, but note that each chunk is buffered in memory one per transfer. - Reducing this will reduce memory usage but decrease performance.`, Advanced: true, }, { Name: "acknowledge_abuse", Default: false, Help: `Set to allow files which return cannotDownloadAbusiveFile to be downloaded. - If downloading a file returns the error "This file has been identified as malware or spam and cannot be downloaded" with the error code "cannotDownloadAbusiveFile" then supply this flag to rclone to @@ -408,20 +384,17 @@ will download it anyway.`, Name: "size_as_quota", Default: false, Help: `Show sizes as storage quota usage, not actual size. - -Show the size of a file as the the storage quota used. This is the +Show the size of a file as the storage quota used. This is the current version plus any older versions that have been set to keep forever. - **WARNING**: This flag may have some unexpected consequences. - It is not recommended to set this flag in your config - the recommended usage is using the flag form --drive-size-as-quota when doing rclone ls/lsl/lsf/lsjson/etc only. - If you do use this flag for syncing (not recommended) then you will need to use --ignore size also.`, Advanced: true, + Hide: fs.OptionHideConfigurator, }, { Name: "v2_download_min_size", Default: fs.SizeSuffix(-1), @@ -441,7 +414,6 @@ need to use --ignore size also.`, Name: "server_side_across_configs", Default: false, Help: `Allow server side operations (eg copy) to work across different drive configs. - This can be useful if you wish to do a server side copy between two different Google drives. Note that this isn't enabled by default because it isn't easy to tell if it will work between any two @@ -451,33 +423,36 @@ configurations.`, Name: "disable_http2", Default: true, Help: `Disable drive using http2 - There is currently an unsolved issue with the google drive backend and HTTP/2. HTTP/2 is therefore disabled by default for the drive backend but can be re-enabled here. When the issue is solved this flag will be removed. - See: https://github.com/rclone/rclone/issues/3631 - `, Advanced: true, }, { Name: "stop_on_upload_limit", Default: false, Help: `Make upload limit errors be fatal - At the time of writing it is only possible to upload 750GB of data to Google Drive a day (this is an undocumented limit). When this limit is reached Google Drive produces a slightly different error message. When this flag is set it causes these errors to be fatal. These will stop the in-progress sync. - Note that this detection is relying on error message strings which Google don't document so it may break in the future. - See: https://github.com/rclone/rclone/issues/3857 `, Advanced: true, + }, { + Name: "skip_shortcuts", + Help: `If set skip shortcut files +Normally rclone dereferences shortcut files making them appear as if +they are the original file (see [the shortcuts section](#shortcuts)). +If this flag is set then rclone will ignore shortcut files completely. +`, + Advanced: true, + Default: false, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, @@ -485,7 +460,7 @@ See: https://github.com/rclone/rclone/issues/3857 // Encode invalid UTF-8 bytes as json doesn't handle them properly. // Don't encode / as it's a valid name character in drive. Default: encoder.EncodeInvalidUtf8, - }}, + }}...), }) // register duplicate MIME types first @@ -518,6 +493,7 @@ type Options struct { SkipChecksumGphotos bool `config:"skip_checksum_gphotos"` SharedWithMe bool `config:"shared_with_me"` TrashedOnly bool `config:"trashed_only"` + StarredOnly bool `config:"starred_only"` Extensions string `config:"formats"` ExportExtensions string `config:"export_formats"` ImportExtensions string `config:"import_formats"` @@ -526,7 +502,6 @@ type Options struct { UseSharedDate bool `config:"use_shared_date"` ListChunk int64 `config:"list_chunk"` Impersonate string `config:"impersonate"` - AlternateExport bool `config:"alternate_export"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` ChunkSize fs.SizeSuffix `config:"chunk_size"` AcknowledgeAbuse bool `config:"acknowledge_abuse"` @@ -538,6 +513,7 @@ type Options struct { ServerSideAcrossConfigs bool `config:"server_side_across_configs"` DisableHTTP2 bool `config:"disable_http2"` StopOnUploadLimit bool `config:"stop_on_upload_limit"` + SkipShortcuts bool `config:"skip_shortcuts"` Enc encoder.MultiEncoder `config:"encoding"` } @@ -556,12 +532,16 @@ type Fs struct { exportExtensions []string // preferred extensions to download docs importMimeTypes []string // MIME types to convert to docs isTeamDrive bool // true if this is a team drive + fileFields googleapi.Field // fields to fetch file info with + m configmap.Mapper + grouping int32 // number of IDs to search at once in ListR - read with atomic + listRmu *sync.Mutex // protects listRempties + listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable //------------------------------------------------------------ ServiceAccountFiles map[string]int waitChangeSvc sync.Mutex FileObj *fs.Object FileName string - } type baseObject struct { @@ -571,6 +551,7 @@ type baseObject struct { modifiedDate string // RFC3339 time it was last modified mimeType string // The object MIME type bytes int64 // size of the object + parents int // number of parents } type documentObject struct { baseObject @@ -643,6 +624,9 @@ func (f *Fs) shouldRetry(err error) (bool, error) { return false, fserrors.FatalError(err) } return true, err + } else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" { + fs.Errorf(f, "Received team drive file limit error: %v", err) + return false, fserrors.FatalError(err) } } } @@ -737,17 +721,21 @@ func containsString(slice []string, s string) bool { return false } -// getRootID returns the canonical ID for the "root" ID -func (f *Fs) getRootID() (string, error) { - var info *drive.File - var err error +// getFile returns drive.File for the ID passed and fields passed in +func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) { err = f.pacer.CallNoRetry(func() (bool, error) { - info, err = f.svc.Files.Get("root"). - Fields("id"). + info, err = f.svc.Files.Get(ID). + Fields(fields). SupportsAllDrives(true). Do() return f.shouldRetry(err) }) + return info, err +} + +// getRootID returns the canonical ID for the "root" ID +func (f *Fs) getRootID() (string, error) { + info, err := f.getFile("root", "id") if err != nil { return "", errors.Wrap(err, "couldn't find root directory ID") } @@ -760,7 +748,6 @@ func (f *Fs) getRootID() (string, error) { // // Search params: https://developers.google.com/drive/search-parameters func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) { - //f.changeSvc() var query []string if !includeAll { q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly) @@ -769,6 +756,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie } query = append(query, q) } + // Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents) // We must not filter with parent when we try list "ROOT" with drive-shared-with-me // If we need to list file inside those shared folders, we must search it without sharedWithMe @@ -780,8 +768,16 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie if parentsQuery.Len() > 1 { _, _ = parentsQuery.WriteString(" or ") } - if f.opt.SharedWithMe && dirID == f.rootFolderID { - _, _ = parentsQuery.WriteString("sharedWithMe=true") + if (f.opt.SharedWithMe || f.opt.StarredOnly) && dirID == f.rootFolderID { + if f.opt.SharedWithMe { + _, _ = parentsQuery.WriteString("sharedWithMe=true") + } + if f.opt.StarredOnly { + if f.opt.SharedWithMe { + _, _ = parentsQuery.WriteString(" and ") + } + _, _ = parentsQuery.WriteString("starred=true") + } } else { _, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID) } @@ -814,7 +810,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie query = append(query, titleQuery.String()) } if directoriesOnly { - query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType)) + query = append(query, fmt.Sprintf("(mimeType='%s' or mimeType='%s')", driveFolderType, shortcutMimeType)) } if filesOnly { query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType)) @@ -838,23 +834,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie list.Spaces("appDataFolder") } - var fields = partialFields + fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields) - if f.opt.AuthOwnerOnly { - fields += ",owners" - } - if f.opt.UseSharedDate { - fields += ",sharedWithMeTime" - } - if f.opt.SkipChecksumGphotos { - fields += ",spaces" - } - if f.opt.SizeAsQuota { - fields += ",quotaBytesUsed" - } - - fields = fmt.Sprintf("files(%s),nextPageToken", fields) - OUTER: for { var files *drive.FileList @@ -865,8 +846,29 @@ OUTER: if err != nil { return false, errors.Wrap(err, "couldn't list directory") } + if files.IncompleteSearch { + fs.Errorf(f, "search result INCOMPLETE") + } for _, item := range files.Files { item.Name = f.opt.Enc.ToStandardName(item.Name) + if isShortcut(item) { + // ignore shortcuts if directed + if f.opt.SkipShortcuts { + continue + } + // skip file shortcuts if directory only + if directoriesOnly && item.ShortcutDetails.TargetMimeType != driveFolderType { + continue + } + // skip directory shortcuts if file only + if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType { + continue + } + item, err = f.resolveShortcut(item) + if err != nil { + return false, errors.Wrap(err, "list") + } + } // Check the case of items is correct since // the `=` operator is case insensitive. if title != "" && title != item.Name { @@ -985,55 +987,32 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name if !config.Confirm(false) { return nil } - client, err := createOAuthClient(opt, name, m) + f, err := newFs(name, "", m) if err != nil { - return errors.Wrap(err, "config team drive failed to create oauth client") - } - svc, err := drive.New(client) - if err != nil { - return errors.Wrap(err, "config team drive failed to make drive client") + return errors.Wrap(err, "failed to make Fs to list teamdrives") } fmt.Printf("Fetching team drive list...\n") - var driveIDs, driveNames []string - listTeamDrives := svc.Teamdrives.List().PageSize(100) - listFailed := false - var defaultFs Fs // default Fs with default Options - for { - var teamDrives *drive.TeamDriveList - err = newPacer(opt).Call(func() (bool, error) { - teamDrives, err = listTeamDrives.Context(ctx).Do() - return defaultFs.shouldRetry(err) - }) - if err != nil { - fmt.Printf("Listing team drives failed: %v\n", err) - listFailed = true - break - } - for _, drive := range teamDrives.TeamDrives { - driveIDs = append(driveIDs, drive.Id) - driveNames = append(driveNames, drive.Name) - } - if teamDrives.NextPageToken == "" { - break - } - listTeamDrives.PageToken(teamDrives.NextPageToken) + teamDrives, err := f.listTeamDrives(ctx) + if err != nil { + return err } - var driveID string - if !listFailed && len(driveIDs) == 0 { + if len(teamDrives) == 0 { fmt.Printf("No team drives found in your account") - } else { - driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true) + return nil } + var driveIDs, driveNames []string + for _, teamDrive := range teamDrives { + driveIDs = append(driveIDs, teamDrive.Id) + driveNames = append(driveNames, teamDrive.Name) + } + driveID := config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true) m.Set("team_drive", driveID) + m.Set("root_folder_id", "") opt.TeamDriveID = driveID + opt.RootFolderID = "" return nil } -// newPacer makes a pacer configured for drive -func newPacer(opt *Options) *fs.Pacer { - return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))) -} - // getClient makes an http client according to the options func getClient(opt *Options) *http.Client { t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) { @@ -1065,7 +1044,7 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli // try loading service account credentials from env variable, then from a file if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" { - loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile)) + loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) if err != nil { return nil, errors.Wrap(err, "error opening service account credentials file") } @@ -1116,9 +1095,11 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { return } -// NewFs constructs an Fs from the path, container:path -func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { - ctx := context.Background() +// newFs partially constructs Fs from the path +// +// It constructs a valid Fs but doesn't attempt to figure out whether +// it is a file or a directory. +func newFs(name, path string, m configmap.Mapper) (*Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) @@ -1166,12 +1147,17 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { } f := &Fs{ - name: name, - root: root, - opt: *opt, - pacer: newPacer(opt), + name: name, + root: root, + opt: *opt, + pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))), + m: m, + grouping: listRGrouping, + listRmu: new(sync.Mutex), + listRempties: make(map[string]struct{}), } f.isTeamDrive = opt.TeamDriveID != "" + f.fileFields = f.getFileFields() f.features = (&fs.Features{ DuplicateFiles: true, ReadMimeType: true, @@ -1194,15 +1180,26 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { } } + return f, nil +} - // set root folder for a team drive or query the user root folder - if opt.RootFolderID != "" { - // override root folder if set or cached in the config - f.rootFolderID = opt.RootFolderID +// NewFs constructs an Fs from the path, container:path +func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { + ctx := context.Background() + f, err := newFs(name, path, m) + if err != nil { + return nil, err + } + + // Set the root folder ID + if f.opt.RootFolderID != "" { + // use root_folder ID if set + f.rootFolderID = f.opt.RootFolderID } else if f.isTeamDrive { + // otherwise use team_drive if set f.rootFolderID = f.opt.TeamDriveID } else { - // Look up the root ID and cache it in the config + // otherwise look up the actual root ID rootID, err := f.getRootID() if err != nil { if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 { @@ -1214,24 +1211,24 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { } } f.rootFolderID = rootID - m.Set("root_folder_id", rootID) + fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID) } - f.dirCache = dircache.New(root, f.rootFolderID, f) + f.dirCache = dircache.New(f.root, f.rootFolderID, f) // Parse extensions - if opt.Extensions != "" { - if opt.ExportExtensions != defaultExportExtensions { + if f.opt.Extensions != "" { + if f.opt.ExportExtensions != defaultExportExtensions { return nil, errors.New("only one of 'formats' and 'export_formats' can be specified") } - opt.Extensions, opt.ExportExtensions = "", opt.Extensions + f.opt.Extensions, f.opt.ExportExtensions = "", f.opt.Extensions } - f.exportExtensions, _, err = parseExtensions(opt.ExportExtensions, defaultExportExtensions) + f.exportExtensions, _, err = parseExtensions(f.opt.ExportExtensions, defaultExportExtensions) if err != nil { return nil, err } - _, f.importMimeTypes, err = parseExtensions(opt.ImportExtensions) + _, f.importMimeTypes, err = parseExtensions(f.opt.ImportExtensions) if err != nil { return nil, err } @@ -1262,7 +1259,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { err = f.dirCache.FindRoot(ctx, false) if err != nil { // Assume it is a file - newRoot, remote := dircache.SplitPath(root) + newRoot, remote := dircache.SplitPath(f.root) tempF := *f tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF) tempF.root = newRoot @@ -1306,10 +1303,29 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject { modifiedDate: modifiedDate, mimeType: info.MimeType, bytes: size, + parents: len(info.Parents), } } -// newRegularObject creates a fs.Object for a normal drive.File +// getFileFields gets the fields for a normal file Get or List +func (f *Fs) getFileFields() (fields googleapi.Field) { + fields = partialFields + if f.opt.AuthOwnerOnly { + fields += ",owners" + } + if f.opt.UseSharedDate { + fields += ",sharedWithMeTime" + } + if f.opt.SkipChecksumGphotos { + fields += ",spaces" + } + if f.opt.SizeAsQuota { + fields += ",quotaBytesUsed" + } + return fields +} + +// newRegularObject creates an fs.Object for a normal drive.File func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object { // wipe checksum if SkipChecksumGphotos and file is type Photo or Video if f.opt.SkipChecksumGphotos { @@ -1322,31 +1338,19 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object { } return &Object{ baseObject: f.newBaseObject(remote, info), - url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id), + url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)), md5sum: strings.ToLower(info.Md5Checksum), v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize), } } -// newDocumentObject creates a fs.Object for a google docs drive.File +// newDocumentObject creates an fs.Object for a google docs drive.File func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) { mediaType, _, err := mime.ParseMediaType(exportMimeType) if err != nil { return nil, err } - url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, info.Id, url.QueryEscape(mediaType)) - if f.opt.AlternateExport { - switch info.MimeType { - case "application/vnd.google-apps.drawing": - url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension[1:]) - case "application/vnd.google-apps.document": - url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension[1:]) - case "application/vnd.google-apps.spreadsheet": - url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension[1:]) - case "application/vnd.google-apps.presentation": - url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension[1:]) - } - } + url := info.ExportLinks[mediaType] baseObject := f.newBaseObject(remote+extension, info) baseObject.bytes = -1 baseObject.mimeType = exportMimeType @@ -1358,7 +1362,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor }, nil } -// newLinkObject creates a fs.Object that represents a link a google docs drive.File +// newLinkObject creates an fs.Object that represents a link a google docs drive.File func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) { t := linkTemplate(exportMimeType) if t == nil { @@ -1384,9 +1388,9 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim }, nil } -// newObjectWithInfo creates a fs.Object for any drive.File +// newObjectWithInfo creates an fs.Object for any drive.File // -// When the drive.File cannot be represented as a fs.Object it will return (nil, nil). +// When the drive.File cannot be represented as an fs.Object it will return (nil, nil). func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) { // If item has MD5 sum or a length it is a file stored on drive if info.Md5Checksum != "" || info.Size > 0 { @@ -1397,28 +1401,46 @@ func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, erro return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument) } -// newObjectWithExportInfo creates a fs.Object for any drive.File and the result of findExportFormat +// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat // -// When the drive.File cannot be represented as a fs.Object it will return (nil, nil). +// When the drive.File cannot be represented as an fs.Object it will return (nil, nil). func (f *Fs) newObjectWithExportInfo( remote string, info *drive.File, - extension, exportName, exportMimeType string, isDocument bool) (fs.Object, error) { + extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) { + // Note that resolveShortcut will have been called already if + // we are being called from a listing. However the drive.Item + // will have been resolved so this will do nothing. + info, err = f.resolveShortcut(info) + if err != nil { + return nil, errors.Wrap(err, "new object") + } switch { + case info.MimeType == driveFolderType: + return nil, fs.ErrorNotAFile + case info.MimeType == shortcutMimeType: + // We can only get here if f.opt.SkipShortcuts is set + // and not from a listing. This is unlikely. + fs.Debugf(remote, "Ignoring shortcut as skip shortcuts is set") + return nil, fs.ErrorObjectNotFound + case info.MimeType == shortcutMimeTypeDangling: + // Pretend a dangling shortcut is a regular object + // It will error if used, but appear in listings so it can be deleted + return f.newRegularObject(remote, info), nil case info.Md5Checksum != "" || info.Size > 0: // If item has MD5 sum or a length it is a file stored on drive return f.newRegularObject(remote, info), nil case f.opt.SkipGdocs: fs.Debugf(remote, "Skipping google document type %q", info.MimeType) - return nil, nil + return nil, fs.ErrorObjectNotFound default: // If item MimeType is in the ExportFormats then it is a google doc if !isDocument { fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType) - return nil, nil + return nil, fs.ErrorObjectNotFound } if extension == "" { fs.Debugf(remote, "No export formats found for %q", info.MimeType) - return nil, nil + return nil, fs.ErrorObjectNotFound } if isLinkMimeType(exportMimeType) { return f.newLinkObject(remote, info, extension, exportMimeType) @@ -1430,11 +1452,6 @@ func (f *Fs) newObjectWithExportInfo( // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { - //------------------------------------ - if(f.FileObj != nil){ - return *f.FileObj, nil - } - //------------------------------------- info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote) if err != nil { return nil, err @@ -1455,6 +1472,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // FindLeaf finds a directory of name leaf in the folder with ID pathID func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID + pathID = actualID(pathID) found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool { if !f.opt.SkipGdocs { _, exportName, _, isDocument := f.findExportFormat(item) @@ -1480,6 +1498,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, leaf = f.opt.Enc.FromStandardName(leaf) // fmt.Println("Making", path) // Define the metadata for the directory we are going to create. + pathID = actualID(pathID) createInfo := &drive.File{ Name: leaf, Description: leaf, @@ -1640,14 +1659,11 @@ func (f *Fs) findImportFormat(mimeType string) string { // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { - err = f.dirCache.FindRoot(ctx, false) - if err != nil { - return nil, err - } directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return nil, err } + directoryID = actualID(directoryID) var iErr error _, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool { @@ -1706,20 +1722,22 @@ func (s listRSlices) Less(i, j int) bool { return s.dirs[i] < s.dirs[j] } -// listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry. +// listRRunner will read dirIDs from the in channel, perform the file listing and call cb with each DirEntry. // // In each cycle it will read up to grouping entries from the in channel without blocking. // If an error occurs it will be send to the out channel and then return. Once the in channel is closed, // nil is send to the out channel and the function returns. -func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) { +func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error, sendJob func(listREntry)) { var dirs []string var paths []string + var grouping int32 for dir := range in { dirs = append(dirs[:0], dir.id) paths = append(paths[:0], dir.path) + grouping = atomic.LoadInt32(&f.grouping) waitloop: - for i := 1; i < grouping; i++ { + for i := int32(1); i < grouping; i++ { select { case d, ok := <-in: if !ok { @@ -1732,9 +1750,16 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list } listRSlices{dirs, paths}.Sort() var iErr error + foundItems := false _, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool { + // shared with me items have no parents when at the root + if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" { + item.Parents = dirs + } for _, parent := range item.Parents { var i int + foundItems = true + earlyExit := false // If only one item in paths then no need to search for the ID // assuming google drive is doing its job properly. // @@ -1744,6 +1769,9 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list // - shared with me items have no parents at the root // - if using a root alias, eg "root" or "appDataFolder" the ID won't match i = 0 + // items at root can have more than one parent so we need to put + // the item in just once. + earlyExit = true } else { // only handle parents that are in the requested dirs list if not at root i = sort.SearchStrings(dirs, parent) @@ -1763,9 +1791,54 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list iErr = err return true } + + // If didn't check parents then insert only once + if earlyExit { + break + } } return false }) + // Found no items in more than one directory. Retry these as + // individual directories This is to work around a bug in google + // drive where (A in parents) or (B in parents) returns nothing + // sometimes. See #3114, #4289 and + // https://issuetracker.google.com/issues/149522397 + if len(dirs) > 1 && !foundItems { + if atomic.SwapInt32(&f.grouping, 1) != 1 { + fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs)) + } + f.listRmu.Lock() + for i := range dirs { + // Requeue the jobs + job := listREntry{id: dirs[i], path: paths[i]} + sendJob(job) + // Make a note of these dirs - if they all turn + // out to be empty then we can re-enable grouping + f.listRempties[dirs[i]] = struct{}{} + } + f.listRmu.Unlock() + fs.Debugf(f, "Recycled %d entries", len(dirs)) + } + // If using a grouping of 1 and dir was empty then check to see if it + // is part of the group that caused grouping to be disabled. + if grouping == 1 && len(dirs) == 1 && !foundItems { + f.listRmu.Lock() + if _, found := f.listRempties[dirs[0]]; found { + // Remove the ID + delete(f.listRempties, dirs[0]) + // If no empties left => all the directories that + // triggered the grouping being set to 1 were actually + // empty so must have made a mistake + if len(f.listRempties) == 0 { + if atomic.SwapInt32(&f.grouping, listRGrouping) != listRGrouping { + fs.Debugf(f, "Re-enabling ListR as previous detection was in error") + } + } + } + f.listRmu.Unlock() + } + for range dirs { wg.Done() } @@ -1800,39 +1873,47 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { - const ( - grouping = 50 - inputBuffer = 1000 - ) - - err = f.dirCache.FindRoot(ctx, false) - if err != nil { - return err - } directoryID, err := f.dirCache.FindDir(ctx, dir, false) if err != nil { return err } + directoryID = actualID(directoryID) mu := sync.Mutex{} // protects in and overflow wg := sync.WaitGroup{} - in := make(chan listREntry, inputBuffer) + in := make(chan listREntry, listRInputBuffer) out := make(chan error, fs.Config.Checkers) list := walk.NewListRHelper(callback) overflow := []listREntry{} listed := 0 - cb := func(entry fs.DirEntry) error { + // Send a job to the input channel if not closed. If the job + // won't fit then queue it in the overflow slice. + // + // This will not block if the channel is full. + sendJob := func(job listREntry) { mu.Lock() defer mu.Unlock() - if d, isDir := entry.(*fs.Dir); isDir && in != nil { - select { - case in <- listREntry{d.ID(), d.Remote()}: - wg.Add(1) - default: - overflow = append(overflow, listREntry{d.ID(), d.Remote()}) - } + if in == nil { + return + } + wg.Add(1) + select { + case in <- job: + default: + overflow = append(overflow, job) + wg.Add(-1) } + } + + // Send the entry to the caller, queueing any directories as new jobs + cb := func(entry fs.DirEntry) error { + if d, isDir := entry.(*fs.Dir); isDir { + job := listREntry{actualID(d.ID()), d.Remote()} + sendJob(job) + } + mu.Lock() + defer mu.Unlock() listed++ return list.Add(entry) } @@ -1841,7 +1922,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( in <- listREntry{directoryID, dir} for i := 0; i < fs.Config.Checkers; i++ { - go f.listRRunner(ctx, &wg, in, out, cb, grouping) + go f.listRRunner(ctx, &wg, in, out, cb, sendJob) } go func() { // wait until the all directories are processed @@ -1850,9 +1931,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( for len(overflow) > 0 { mu.Lock() l := len(overflow) - // only fill half of the channel to prevent entries beeing put into overflow again - if l > inputBuffer/2 { - l = inputBuffer / 2 + // only fill half of the channel to prevent entries being put into overflow again + if l > listRInputBuffer/2 { + l = listRInputBuffer / 2 } wg.Add(l) for _, d := range overflow[:l] { @@ -1907,10 +1988,94 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( return nil } -// itemToDirEntry converts a drive.File to a fs.DirEntry. -// When the drive.File cannot be represented as a fs.DirEntry +const shortcutSeparator = '\t' + +// joinID adds an actual drive ID to the shortcut ID it came from +// +// directoryIDs in the dircache are these composite directory IDs so +// we must always unpack them before use. +func joinID(actual, shortcut string) string { + return actual + string(shortcutSeparator) + shortcut +} + +// splitID separates an actual ID and a shortcut ID from a composite +// ID. If there was no shortcut ID then it will return "" for it. +func splitID(compositeID string) (actualID, shortcutID string) { + i := strings.IndexRune(compositeID, shortcutSeparator) + if i < 0 { + return compositeID, "" + } + return compositeID[:i], compositeID[i+1:] +} + +// isShortcutID returns true if compositeID refers to a shortcut +func isShortcutID(compositeID string) bool { + return strings.IndexRune(compositeID, shortcutSeparator) >= 0 +} + +// actualID returns an actual ID from a composite ID +func actualID(compositeID string) (actualID string) { + actualID, _ = splitID(compositeID) + return actualID +} + +// shortcutID returns a shortcut ID from a composite ID if available, +// or the actual ID if not. +func shortcutID(compositeID string) (shortcutID string) { + actualID, shortcutID := splitID(compositeID) + if shortcutID != "" { + return shortcutID + } + return actualID +} + +// isShortcut returns true of the item is a shortcut +func isShortcut(item *drive.File) bool { + return item.MimeType == shortcutMimeType && item.ShortcutDetails != nil +} + +// Dereference shortcut if required. It returns the newItem (which may +// be just item). +// +// If we return a new item then the ID will be adjusted to be a +// composite of the actual ID and the shortcut ID. This is to make +// sure that we have decided in all use places what we are doing with +// the ID. +// +// Note that we assume shortcuts can't point to shortcuts. Google +// drive web interface doesn't offer the option to create a shortcut +// to a shortcut. The documentation is silent on the issue. +func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) { + if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType { + return item, nil + } + if item.ShortcutDetails == nil { + fs.Errorf(nil, "Expecting shortcutDetails in %v", item) + return item, nil + } + newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields) + if err != nil { + if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 { + // 404 means dangling shortcut, so just return the shortcut with the mime type mangled + fs.Logf(nil, "Dangling shortcut %q detected", item.Name) + item.MimeType = shortcutMimeTypeDangling + return item, nil + } + return nil, errors.Wrap(err, "failed to resolve shortcut") + } + // make sure we use the Name, Parents and Trashed from the original item + newItem.Name = item.Name + newItem.Parents = item.Parents + newItem.Trashed = item.Trashed + // the new ID is a composite ID + newItem.Id = joinID(newItem.Id, item.Id) + return newItem, nil +} + +// itemToDirEntry converts a drive.File to an fs.DirEntry. +// When the drive.File cannot be represented as an fs.DirEntry // (nil, nil) is returned. -func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) { +func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) { switch { case item.MimeType == driveFolderType: // cache the directory ID for later lookups @@ -1921,7 +2086,11 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error case f.opt.AuthOwnerOnly && !isAuthOwned(item): // ignore object default: - return f.newObjectWithInfo(remote, item) + entry, err = f.newObjectWithInfo(remote, item) + if err == fs.ErrorObjectNotFound { + return nil, nil + } + return entry, err } return nil, nil } @@ -1930,10 +2099,11 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error // // Used to create new objects func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) { - leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true) + leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) if err != nil { return nil, err } + directoryID = actualID(directoryID) leaf = f.opt.Enc.FromStandardName(leaf) // Define the metadata for the file we are going to create. @@ -2037,12 +2207,24 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { + if len(dirs) < 2 { + return nil + } + newDirs := dirs[:0] + for _, dir := range dirs { + if isShortcutID(dir.ID()) { + fs.Infof(dir, "skipping shortcut directory") + continue + } + newDirs = append(newDirs, dir) + } + dirs = newDirs if len(dirs) < 2 { return nil } dstDir := dirs[0] for _, srcDir := range dirs[1:] { - // list the the objects + // list the objects infos := []*drive.File{} _, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool { infos = append(infos, info) @@ -2070,7 +2252,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { } // rmdir (into trash) the now empty source directory fs.Infof(srcDir, "removing empty directory") - err = f.rmdir(ctx, srcDir.ID(), true) + err = f.delete(ctx, srcDir.ID(), true) if err != nil { return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir) } @@ -2080,30 +2262,24 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { // Mkdir creates the container if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) error { - err := f.dirCache.FindRoot(ctx, true) - if err != nil { - return err - } - if dir != "" { - _, err = f.dirCache.FindDir(ctx, dir, true) - } + _, err := f.dirCache.FindDir(ctx, dir, true) return err } -// Rmdir deletes a directory unconditionally by ID -func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error { +// delete a file or directory unconditionally by ID +func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error { return f.pacer.Call(func() (bool, error) { var err error if useTrash { info := drive.File{ Trashed: true, } - _, err = f.svc.Files.Update(directoryID, &info). + _, err = f.svc.Files.Update(id, &info). Fields(""). SupportsAllDrives(true). Do() } else { - err = f.svc.Files.Delete(directoryID). + err = f.svc.Files.Delete(id). Fields(""). SupportsAllDrives(true). Do() @@ -2112,40 +2288,48 @@ func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error }) } -// Rmdir deletes a directory -// -// Returns an error if it isn't empty -func (f *Fs) Rmdir(ctx context.Context, dir string) error { +// purgeCheck removes the dir directory, if check is set then it +// refuses to do so if it has anything in +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { root := path.Join(f.root, dir) dc := f.dirCache directoryID, err := dc.FindDir(ctx, dir, false) if err != nil { return err } + directoryID, shortcutID := splitID(directoryID) + // if directory is a shortcut remove it regardless + if shortcutID != "" { + return f.delete(ctx, shortcutID, f.opt.UseTrash) + } var trashedFiles = false - found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool { - if !item.Trashed { - fs.Debugf(dir, "Rmdir: contains file: %q", item.Name) - return true + if check { + found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool { + if !item.Trashed { + fs.Debugf(dir, "Rmdir: contains file: %q", item.Name) + return true + } + fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name) + trashedFiles = true + return false + }) + if err != nil { + return err + } + if found { + return errors.Errorf("directory not empty") } - fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name) - trashedFiles = true - return false - }) - if err != nil { - return err - } - if found { - return errors.Errorf("directory not empty") } if root != "" { // trash the directory if it had trashed files // in or the user wants to trash, otherwise // delete it. - err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash) + err = f.delete(ctx, directoryID, trashedFiles || f.opt.UseTrash) if err != nil { return err } + } else if check { + return errors.New("can't purge root directory") } f.dirCache.FlushDir(dir) if err != nil { @@ -2154,6 +2338,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error { return nil } +// Rmdir deletes a directory +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) +} + // Precision of the object storage system func (f *Fs) Precision() time.Duration { return time.Millisecond @@ -2171,11 +2362,13 @@ func (f *Fs) Precision() time.Duration { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { var srcObj *baseObject ext := "" + isDoc := false switch src := src.(type) { case *Object: srcObj = &src.baseObject case *documentObject: srcObj, ext = &src.baseObject, src.ext() + isDoc = true case *linkObject: srcObj, ext = &src.baseObject, src.ext() default: @@ -2183,6 +2376,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, return nil, fs.ErrorCantCopy } + // Look to see if there is an existing object before we remove + // the extension from the remote + existingObject, _ := f.NewObject(ctx, remote) + + // Adjust the remote name to be without the extension if we + // are about to create a doc. if ext != "" { if !strings.HasSuffix(remote, ext) { fs.Debugf(src, "Can't copy - not same document type") @@ -2191,17 +2390,30 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, remote = remote[:len(remote)-len(ext)] } - // Look to see if there is an existing object - existingObject, _ := f.NewObject(ctx, remote) - createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx)) if err != nil { return nil, err } + if isDoc { + // preserve the description on copy for docs + info, err := f.getFile(actualID(srcObj.id), "description") + if err != nil { + return nil, errors.Wrap(err, "failed to read description for Google Doc") + } + createInfo.Description = info.Description + } else { + // don't overwrite the description on copy for files + // this should work for docs but it doesn't - it is probably a bug in Google Drive + createInfo.Description = "" + } + + // get the ID of the thing to copy - this is the shortcut if available + id := shortcutID(srcObj.id) + var info *drive.File err = f.pacer.Call(func() (bool, error) { - info, err = f.svc.Files.Copy(srcObj.id, createInfo). + info, err = f.svc.Files.Copy(id, createInfo). Fields(partialFields). SupportsAllDrives(true). KeepRevisionForever(f.opt.KeepRevisionForever). @@ -2215,6 +2427,22 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, if err != nil { return nil, err } + // Google docs aren't preserving their mod time after copy, so set them explicitly + // See: https://github.com/rclone/rclone/issues/4517 + // + // FIXME remove this when google fixes the problem! + if isDoc { + // A short sleep is needed here in order to make the + // change effective, without it is is ignored. This is + // probably some eventual consistency nastiness. + sleepTime := 2 * time.Second + fs.Debugf(f, "Sleeping for %v before setting the modtime to work around drive bug - see #4517", sleepTime) + time.Sleep(sleepTime) + err = newObject.SetModTime(ctx, src.ModTime(ctx)) + if err != nil { + return nil, err + } + } if existingObject != nil { err = existingObject.Remove(ctx) if err != nil { @@ -2229,39 +2457,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *Fs) Purge(ctx context.Context) error { - if f.root == "" { - return errors.New("can't purge root directory") - } +func (f *Fs) Purge(ctx context.Context, dir string) error { if f.opt.TrashedOnly { return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files") } - err := f.dirCache.FindRoot(ctx, false) - if err != nil { - return err - } - err = f.pacer.Call(func() (bool, error) { - if f.opt.UseTrash { - info := drive.File{ - Trashed: true, - } - _, err = f.svc.Files.Update(f.dirCache.RootID(), &info). - Fields(""). - SupportsAllDrives(true). - Do() - } else { - err = f.svc.Files.Delete(f.dirCache.RootID()). - Fields(""). - SupportsAllDrives(true). - Do() - } - return f.shouldRetry(err) - }) - f.dirCache.ResetRoot() - if err != nil { - return err - } - return nil + return f.purgeCheck(ctx, dir, false) } // CleanUp empties the trash @@ -2362,6 +2562,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, if err != nil { return nil, err } + srcParentID = actualID(srcParentID) // Temporary Object under construction dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx)) @@ -2374,7 +2575,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, // Do the move var info *drive.File err = f.pacer.Call(func() (bool, error) { - info, err = f.svc.Files.Update(srcObj.id, dstInfo). + info, err = f.svc.Files.Update(shortcutID(srcObj.id), dstInfo). RemoveParents(srcParentID). AddParents(dstParents). Fields(partialFields). @@ -2390,17 +2591,18 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, } // PublicLink adds a "readable by anyone with link" permission on the given file or folder. -func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { +func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) { id, err := f.dirCache.FindDir(ctx, remote, false) if err == nil { fs.Debugf(f, "attempting to share directory '%s'", remote) + id = shortcutID(id) } else { fs.Debugf(f, "attempting to share single file '%s'", remote) o, err := f.NewObject(ctx, remote) if err != nil { return "", err } - id = o.(fs.IDer).ID() + id = shortcutID(o.(fs.IDer).ID()) } permission := &drive.Permission{ @@ -2438,79 +2640,22 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } - srcPath := path.Join(srcFs.root, srcRemote) - dstPath := path.Join(f.root, dstRemote) - - // Refuse to move to or from the root - if srcPath == "" || dstPath == "" { - fs.Debugf(src, "DirMove error: Can't move root") - return errors.New("can't move root directory") - } - // find the root src directory - err := srcFs.dirCache.FindRoot(ctx, false) + srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) if err != nil { return err } + _ = srcLeaf - // find the root dst directory - if dstRemote != "" { - err = f.dirCache.FindRoot(ctx, true) - if err != nil { - return err - } - } else { - if f.dirCache.FoundRoot() { - return fs.ErrorDirExists - } - } - - // Find ID of dst parent, creating subdirs if necessary - var leaf, dstDirectoryID string - findPath := dstRemote - if dstRemote == "" { - findPath = f.root - } - leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true) - if err != nil { - return err - } - - // Check destination does not exist - if dstRemote != "" { - _, err = f.dirCache.FindDir(ctx, dstRemote, false) - if err == fs.ErrorDirNotFound { - // OK - } else if err != nil { - return err - } else { - return fs.ErrorDirExists - } - } - - // Find ID of src parent - var srcDirectoryID string - if srcRemote == "" { - srcDirectoryID, err = srcFs.dirCache.RootParentID() - } else { - _, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false) - } - if err != nil { - return err - } - - // Find ID of src - srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) - if err != nil { - return err - } + dstDirectoryID = actualID(dstDirectoryID) + srcDirectoryID = actualID(srcDirectoryID) // Do the move patch := drive.File{ - Name: leaf, + Name: dstLeaf, } err = f.pacer.Call(func() (bool, error) { - _, err = f.svc.Files.Update(srcID, &patch). + _, err = f.svc.Files.Update(shortcutID(srcID), &patch). RemoveParents(srcDirectoryID). AddParents(dstDirectoryID). Fields(""). @@ -2687,6 +2832,387 @@ func (f *Fs) Hashes() hash.Set { return hash.Set(hash.MD5) } +func (f *Fs) changeChunkSize(chunkSizeString string) (err error) { + chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64) + if err != nil { + return errors.Wrap(err, "couldn't convert chunk size to int") + } + chunkSize := fs.SizeSuffix(chunkSizeInt) + if chunkSize == f.opt.ChunkSize { + return nil + } + err = checkUploadChunkSize(chunkSize) + if err == nil { + f.opt.ChunkSize = chunkSize + } + return err +} + +func (f *Fs) changeServiceAccountFile(file string) (err error) { + fs.Debugf(nil, "Changing Service Account File from %s to %s", f.opt.ServiceAccountFile, file) + if file == f.opt.ServiceAccountFile { + return nil + } + oldSvc := f.svc + oldv2Svc := f.v2Svc + oldOAuthClient := f.client + oldFile := f.opt.ServiceAccountFile + oldCredentials := f.opt.ServiceAccountCredentials + defer func() { + // Undo all the changes instead of doing selective undo's + if err != nil { + f.svc = oldSvc + f.v2Svc = oldv2Svc + f.client = oldOAuthClient + f.opt.ServiceAccountFile = oldFile + f.opt.ServiceAccountCredentials = oldCredentials + } + }() + f.opt.ServiceAccountFile = file + f.opt.ServiceAccountCredentials = "" + oAuthClient, err := createOAuthClient(&f.opt, f.name, f.m) + if err != nil { + return errors.Wrap(err, "drive: failed when making oauth client") + } + f.client = oAuthClient + f.svc, err = drive.New(f.client) + if err != nil { + return errors.Wrap(err, "couldn't create Drive client") + } + if f.opt.V2DownloadMinSize >= 0 { + f.v2Svc, err = drive_v2.New(f.client) + if err != nil { + return errors.Wrap(err, "couldn't create Drive v2 client") + } + } + return nil +} + +// Create a shortcut from (f, srcPath) to (dstFs, dstPath) +// +// Will not overwrite existing files +func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPath string) (o fs.Object, err error) { + srcFs := f + srcPath = strings.Trim(srcPath, "/") + dstPath = strings.Trim(dstPath, "/") + if dstPath == "" { + return nil, errors.New("shortcut destination can't be root directory") + } + + // Find source + var srcID string + isDir := false + if srcPath == "" { + // source is root directory + srcID, err = f.dirCache.RootID(ctx, false) + if err != nil { + return nil, err + } + isDir = true + } else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil { + if err != fs.ErrorNotAFile { + return nil, errors.Wrap(err, "can't find source") + } + // source was a directory + srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false) + if err != nil { + return nil, errors.Wrap(err, "failed to find source dir") + } + isDir = true + } else { + // source was a file + srcID = srcObj.(*Object).id + } + srcID = actualID(srcID) // link to underlying object not to shortcut + + // Find destination + _, err = dstFs.NewObject(ctx, dstPath) + if err != fs.ErrorObjectNotFound { + if err == nil { + err = errors.New("existing file") + } else if err == fs.ErrorNotAFile { + err = errors.New("existing directory") + } + return nil, errors.Wrap(err, "not overwriting shortcut target") + } + + // Create destination shortcut + createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now()) + if err != nil { + return nil, errors.Wrap(err, "shortcut destination failed") + } + createInfo.MimeType = shortcutMimeType + createInfo.ShortcutDetails = &drive.FileShortcutDetails{ + TargetId: srcID, + } + + var info *drive.File + err = dstFs.pacer.CallNoRetry(func() (bool, error) { + info, err = dstFs.svc.Files.Create(createInfo). + Fields(partialFields). + SupportsAllDrives(true). + KeepRevisionForever(dstFs.opt.KeepRevisionForever). + Do() + return dstFs.shouldRetry(err) + }) + if err != nil { + return nil, errors.Wrap(err, "shortcut creation failed") + } + if isDir { + return nil, nil + } + return dstFs.newObjectWithInfo(dstPath, info) +} + +// List all team drives +func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err error) { + drives = []*drive.TeamDrive{} + listTeamDrives := f.svc.Teamdrives.List().PageSize(100) + var defaultFs Fs // default Fs with default Options + for { + var teamDrives *drive.TeamDriveList + err = f.pacer.Call(func() (bool, error) { + teamDrives, err = listTeamDrives.Context(ctx).Do() + return defaultFs.shouldRetry(err) + }) + if err != nil { + return drives, errors.Wrap(err, "listing team drives failed") + } + drives = append(drives, teamDrives.TeamDrives...) + if teamDrives.NextPageToken == "" { + break + } + listTeamDrives.PageToken(teamDrives.NextPageToken) + } + return drives, nil +} + +type unTrashResult struct { + Untrashed int + Errors int +} + +func (r unTrashResult) Error() string { + return fmt.Sprintf("%d errors while untrashing - see log", r.Errors) +} + +// Restore the trashed files from dir, directoryID recursing if needed +func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurse bool) (r unTrashResult, err error) { + directoryID = actualID(directoryID) + fs.Debugf(dir, "finding trash to restore in directory %q", directoryID) + _, err = f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool { + remote := path.Join(dir, item.Name) + if item.ExplicitlyTrashed { + fs.Infof(remote, "restoring %q", item.Id) + if operations.SkipDestructive(ctx, remote, "restore") { + return false + } + update := drive.File{ + ForceSendFields: []string{"Trashed"}, // necessary to set false value + Trashed: false, + } + err := f.pacer.Call(func() (bool, error) { + _, err := f.svc.Files.Update(item.Id, &update). + SupportsAllDrives(true). + Fields("trashed"). + Do() + return f.shouldRetry(err) + }) + if err != nil { + err = errors.Wrap(err, "failed to restore") + r.Errors++ + fs.Errorf(remote, "%v", err) + } else { + r.Untrashed++ + } + } + if recurse && item.MimeType == "application/vnd.google-apps.folder" { + if !isShortcutID(item.Id) { + rNew, _ := f.unTrash(ctx, remote, item.Id, recurse) + r.Untrashed += rNew.Untrashed + r.Errors += rNew.Errors + } + } + return false + }) + if err != nil { + err = errors.Wrap(err, "failed to list directory") + r.Errors++ + fs.Errorf(dir, "%v", err) + } + if r.Errors != 0 { + return r, r + } + return r, nil +} + +// Untrash dir +func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTrashResult, err error) { + directoryID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + r.Errors++ + return r, err + } + return f.unTrash(ctx, dir, directoryID, true) +} + +var commandHelp = []fs.CommandHelp{{ + Name: "get", + Short: "Get command for fetching the drive config parameters", + Long: `This is a get command which will be used to fetch the various drive config parameters +Usage Examples: + rclone backend get drive: [-o service_account_file] [-o chunk_size] + rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size] +`, + Opts: map[string]string{ + "chunk_size": "show the current upload chunk size", + "service_account_file": "show the current service account file", + }, +}, { + Name: "set", + Short: "Set command for updating the drive config parameters", + Long: `This is a set command which will be used to update the various drive config parameters +Usage Examples: + rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864] + rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864] +`, + Opts: map[string]string{ + "chunk_size": "update the current upload chunk size", + "service_account_file": "update the current service account file", + }, +}, { + Name: "shortcut", + Short: "Create shortcuts from files or directories", + Long: `This command creates shortcuts from files or directories. +Usage: + rclone backend shortcut drive: source_item destination_shortcut + rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut +In the first example this creates a shortcut from the "source_item" +which can be a file or a directory to the "destination_shortcut". The +"source_item" and the "destination_shortcut" should be relative paths +from "drive:" +In the second example this creates a shortcut from the "source_item" +relative to "drive:" to the "destination_shortcut" relative to +"drive2:". This may fail with a permission error if the user +authenticated with "drive2:" can't read files from "drive:". +`, + Opts: map[string]string{ + "target": "optional target remote for the shortcut destination", + }, +}, { + Name: "drives", + Short: "List the shared drives available to this account", + Long: `This command lists the shared drives (teamdrives) available to this +account. +Usage: + rclone backend drives drive: +This will return a JSON list of objects like this + [ + { + "id": "0ABCDEF-01234567890", + "kind": "drive#teamDrive", + "name": "My Drive" + }, + { + "id": "0ABCDEFabcdefghijkl", + "kind": "drive#teamDrive", + "name": "Test Drive" + } + ] +`, +}, { + Name: "untrash", + Short: "Untrash files and directories", + Long: `This command untrashes all the files and directories in the directory +passed in recursively. +Usage: +This takes an optional directory to trash which make this easier to +use via the API. + rclone backend untrash drive:directory + rclone backend -i untrash drive:directory subdir +Use the -i flag to see what would be restored before restoring it. +Result: + { + "Untrashed": 17, + "Errors": 0 + } +`, +}} + +// Command the backend to run a named command +// +// The command run is name +// args may be used to read arguments from +// opts may be used to read optional arguments from +// +// The result should be capable of being JSON encoded +// If it is a string or a []string it will be shown to the user +// otherwise it will be JSON encoded and shown to the user like that +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { + switch name { + case "get": + out := make(map[string]string) + if _, ok := opt["service_account_file"]; ok { + out["service_account_file"] = f.opt.ServiceAccountFile + } + if _, ok := opt["chunk_size"]; ok { + out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize) + } + return out, nil + case "set": + out := make(map[string]map[string]string) + if serviceAccountFile, ok := opt["service_account_file"]; ok { + serviceAccountMap := make(map[string]string) + serviceAccountMap["previous"] = f.opt.ServiceAccountFile + if err = f.changeServiceAccountFile(serviceAccountFile); err != nil { + return out, err + } + f.m.Set("service_account_file", serviceAccountFile) + serviceAccountMap["current"] = f.opt.ServiceAccountFile + out["service_account_file"] = serviceAccountMap + } + if chunkSize, ok := opt["chunk_size"]; ok { + chunkSizeMap := make(map[string]string) + chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize) + if err = f.changeChunkSize(chunkSize); err != nil { + return out, err + } + chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize) + f.m.Set("chunk_size", chunkSizeString) + chunkSizeMap["current"] = chunkSizeString + out["chunk_size"] = chunkSizeMap + } + return out, nil + case "shortcut": + if len(arg) != 2 { + return nil, errors.New("need exactly 2 arguments") + } + dstFs := f + target, ok := opt["target"] + if ok { + targetFs, err := cache.Get(target) + if err != nil { + return nil, errors.Wrap(err, "couldn't find target") + } + dstFs, ok = targetFs.(*Fs) + if !ok { + return nil, errors.New("target is not a drive backend") + } + } + return f.makeShortcut(ctx, arg[0], dstFs, arg[1]) + case "drives": + return f.listTeamDrives(ctx) + case "untrash": + dir := "" + if len(arg) > 0 { + dir = arg[0] + } + return f.unTrashDir(ctx, dir, true) + default: + return nil, fs.ErrorCommandNotFound + } +} + // ------------------------------------------------------------ // Fs returns the parent Fs @@ -2740,15 +3266,16 @@ func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File // getRemoteInfoWithExport returns a drive.File and the export settings for the remote func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) ( info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) { - leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false) + leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) if err != nil { if err == fs.ErrorDirNotFound { return nil, "", "", "", false, fs.ErrorObjectNotFound } return nil, "", "", "", false, err } + directoryID = actualID(directoryID) - found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool { + found, err := f.list(ctx, []string{directoryID}, leaf, false, false, false, func(item *drive.File) bool { if !f.opt.SkipGdocs { extension, exportName, exportMimeType, isDocument = f.findExportFormat(item) if exportName == leaf { @@ -2798,7 +3325,7 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error { var info *drive.File err := o.fs.pacer.Call(func() (bool, error) { var err error - info, err = o.fs.svc.Files.Update(o.id, updateInfo). + info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo). Fields(partialFields). SupportsAllDrives(true). Do() @@ -2849,7 +3376,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio return req, res, nil } -// openDocumentFile represents an documentObject open for reading. +// openDocumentFile represents a documentObject open for reading. // Updates the object size after read successfully. type openDocumentFile struct { o *documentObject // Object we are reading for @@ -2924,10 +3451,13 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + if o.mimeType == shortcutMimeTypeDangling { + return nil, errors.New("can't read dangling shortcut") + } if o.v2Download { var v2File *drive_v2.File err = o.fs.pacer.Call(func() (bool, error) { - v2File, err = o.fs.v2Svc.Files.Get(o.id). + v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)). Fields("downloadUrl"). SupportsAllDrives(true). Do() @@ -3006,7 +3536,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM if size >= 0 && size < int64(o.fs.opt.UploadCutoff) { // Don't retry, return a retry error instead err = o.fs.pacer.CallNoRetry(func() (bool, error) { - info, err = o.fs.svc.Files.Update(o.id, updateInfo). + info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo). Media(in, googleapi.ContentType(uploadMimeType)). Fields(partialFields). SupportsAllDrives(true). @@ -3026,6 +3556,26 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + // If o is a shortcut + if isShortcutID(o.id) { + // Delete it first + err := o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash) + if err != nil { + return err + } + // Then put the file as a new file + newObj, err := o.fs.PutUnchecked(ctx, in, src, options...) + if err != nil { + return err + } + // Update the object + if newO, ok := newObj.(*Object); ok { + *o = *newO + } else { + fs.Debugf(newObj, "Failed to update object %T from new object %T", o, newObj) + } + return nil + } srcMimeType := fs.MimeType(ctx, src) updateInfo := &drive.File{ MimeType: srcMimeType, @@ -3096,25 +3646,10 @@ func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo // Remove an object func (o *baseObject) Remove(ctx context.Context) error { - var err error - err = o.fs.pacer.Call(func() (bool, error) { - if o.fs.opt.UseTrash { - info := drive.File{ - Trashed: true, - } - _, err = o.fs.svc.Files.Update(o.id, &info). - Fields(""). - SupportsAllDrives(true). - Do() - } else { - err = o.fs.svc.Files.Delete(o.id). - Fields(""). - SupportsAllDrives(true). - Do() - } - return o.fs.shouldRetry(err) - }) - return err + if o.parents > 1 { + return errors.New("can't delete safely - has multiple parents") + } + return o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash) } // MimeType of an Object if known, "" otherwise @@ -3176,6 +3711,7 @@ var ( _ fs.Copier = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) + _ fs.Commander = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil) From f81dbb9db757b37cfb77438e338780455eb9180d Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 00:30:27 +0200 Subject: [PATCH 07/25] Update drive.go --- backend/drive/drive.go | 44 +----------------------------------------- 1 file changed, 1 insertion(+), 43 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index b4de709..925d746 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -689,7 +689,7 @@ func (f *Fs) changeSvc(){ /** * Create client and svc */ - loadedCreds, _ := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile)) + loadedCreds, _ := ioutil.ReadFile(opt.ServiceAccountFile) opt.ServiceAccountCredentials = string(loadedCreds) oAuthClient, err := getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials)) if err != nil { @@ -1103,27 +1103,7 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) { // Parse config into Options struct opt := new(Options) err := configstruct.Set(m, opt) - //----------------------------------------------------------- - maybeIsFile := false - // Add {id} as the root directory function - if(path != "" && path[0:1] == "{"){ - idIndex := strings.Index(path,"}") - if(idIndex > 0){ - RootId := path[1:idIndex]; - name += RootId - //opt.ServerSideAcrossConfigs = true - if(len(RootId) == 33){ - maybeIsFile = true - opt.RootFolderID = RootId; - }else{ - opt.RootFolderID = RootId; - opt.TeamDriveID = RootId; - } - path = path[idIndex+1:] - } - } - //----------------------------------------------------------- if err != nil { return nil, err } @@ -1232,28 +1212,6 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { if err != nil { return nil, err } - //------------------------------------------------------ - if(maybeIsFile){ - file,err := f.svc.Files.Get(opt.RootFolderID).Fields("name","id","size","mimeType").SupportsAllDrives(true).Do() - if err == nil{ - //fmt.Println("file.MimeType", file.MimeType) - if( "application/vnd.google-apps.folder" != file.MimeType && file.MimeType != ""){ - tempF := *f - newRoot := "" - tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF) - tempF.root = newRoot - f.dirCache = tempF.dirCache - f.root = tempF.root - - extension, exportName, exportMimeType, isDocument := f.findExportFormat(file) - obj, _ := f.newObjectWithExportInfo(file.Name, file, extension, exportName, exportMimeType, isDocument) - f.root = "isFile:"+file.Name - f.FileObj = &obj - return f, fs.ErrorIsFile - } - } - } - //------------------------------------------------------ // Find the current root err = f.dirCache.FindRoot(ctx, false) From c1169551096aedefa1ac74994c86dcd7ca97fdb9 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 01:04:32 +0200 Subject: [PATCH 08/25] update args --- backend/drive/drive.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 925d746..263c7ed 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -658,14 +658,14 @@ func (f *Fs) changeSvc(){ if(len(f.ServiceAccountFiles) <= 0){ return ; } - startSA := opt.service_account_start + startSA := opt.ServiceAccountFileStart // If it is still 0 after reading the folder, exit - if(opt.service_account_start == -1){ + if(opt.ServiceAccountFileStart == -1){ startSA := 0 ; } - endSA := opt.service_account_end + endSA := opt.ServiceAccountFileEnd // If it is still 0 after reading the folder, exit - if(opt.service_account_end == -1){ + if(opt.ServiceAccountFileEnd == -1){ endSA := len(f.ServiceAccountFiles) ; } /** From e3059fb743724650960d7693397f8fd804c19e12 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 01:20:08 +0200 Subject: [PATCH 09/25] Update drive.go --- backend/drive/drive.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 263c7ed..be9ac5e 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -671,15 +671,18 @@ func (f *Fs) changeSvc(){ /** * Take the first SA, then if already used, the next one */ - r := startSA + r := 0 for k := range f.ServiceAccountFiles { - if f.ServiceAccountFiles[r] != nil { - opt.ServiceAccountFile = k + if r < startSA { + r++ } - if r > endSA { + else if r > endSA { break } - r++ + else if k != nil { + opt.ServiceAccountFile = k + break + } else fmt.Println("No more SA available !", r) } opt.ServiceAccountFile=f.ServiceAccountFiles[startSA] From 6780d9cfb6fc43c2d83b315d68025ac70dd3d666 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 01:26:20 +0200 Subject: [PATCH 10/25] Update drive.go --- backend/drive/drive.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index be9ac5e..2324542 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -682,7 +682,9 @@ func (f *Fs) changeSvc(){ else if k != nil { opt.ServiceAccountFile = k break - } else fmt.Println("No more SA available !", r) + } else { + fmt.Println("No more SA available !", r) + } } opt.ServiceAccountFile=f.ServiceAccountFiles[startSA] From c563710ecc83ad14f76e0a94e1e80b2f458f5a7a Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 01:33:35 +0200 Subject: [PATCH 11/25] Update drive.go --- backend/drive/drive.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 2324542..a687232 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -675,11 +675,9 @@ func (f *Fs) changeSvc(){ for k := range f.ServiceAccountFiles { if r < startSA { r++ - } - else if r > endSA { + } else if r > endSA { break - } - else if k != nil { + } else if k != nil { opt.ServiceAccountFile = k break } else { From a1efb47d60d42000586b19c8a3fe200ce443dd10 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 01:56:35 +0200 Subject: [PATCH 12/25] Update drive.go --- backend/drive/drive.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index a687232..ee2e2c6 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -677,15 +677,14 @@ func (f *Fs) changeSvc(){ r++ } else if r > endSA { break - } else if k != nil { + } else if f.ServiceAccountFiles[k] != nil { opt.ServiceAccountFile = k break } else { fmt.Println("No more SA available !", r) } } - opt.ServiceAccountFile=f.ServiceAccountFiles[startSA] - + // Remove from inventory delete(f.ServiceAccountFiles, opt.ServiceAccountFile) From 5c90985a78c882da6275047d01b0efe6d1ebb9a2 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 02:25:38 +0200 Subject: [PATCH 13/25] Update drive.go --- backend/drive/drive.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index ee2e2c6..a6e78ff 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -671,14 +671,13 @@ func (f *Fs) changeSvc(){ /** * Take the first SA, then if already used, the next one */ - r := 0 - for k := range f.ServiceAccountFiles { - if r < startSA { + for i, v := range f.ServiceAccountFiles { + if i < startSA { r++ - } else if r > endSA { + } else if i > endSA { break - } else if f.ServiceAccountFiles[k] != nil { - opt.ServiceAccountFile = k + } else if f.ServiceAccountFiles[v] == startSA { + opt.ServiceAccountFile = v break } else { fmt.Println("No more SA available !", r) @@ -687,7 +686,9 @@ func (f *Fs) changeSvc(){ // Remove from inventory delete(f.ServiceAccountFiles, opt.ServiceAccountFile) - + // Remove + opt.ServiceAccountFileEnd-- + /** * Create client and svc */ From 0d2806878df6593d82a3e4c903f0171605b0ba58 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 03:08:42 +0200 Subject: [PATCH 14/25] Update drive.go --- backend/drive/drive.go | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index a6e78ff..df59336 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -667,27 +667,28 @@ func (f *Fs) changeSvc(){ // If it is still 0 after reading the folder, exit if(opt.ServiceAccountFileEnd == -1){ endSA := len(f.ServiceAccountFiles) ; + } else if(opt.ServiceAccountFileEnd > len(f.ServiceAccountFiles)){ + endSA := len(f.ServiceAccountFiles) ; } - /** - * Take the first SA, then if already used, the next one - */ - for i, v := range f.ServiceAccountFiles { - if i < startSA { - r++ - } else if i > endSA { - break - } else if f.ServiceAccountFiles[v] == startSA { - opt.ServiceAccountFile = v - break - } else { - fmt.Println("No more SA available !", r) - } + + // sort the SA + var keys []string + for k := range f.ServiceAccountFiles { + keys = append(keys, k) + } + sort.Strings(keys) + if(startSA>endSA){ + fmt.Println("No more SA available !, last :%d", endSA) + return ; + } + // get the range we want + var sa []string = keys[startSA:endSA] + for _, k := range sa { + opt.ServiceAccountFile = f.ServiceAccountFiles[k] + break } - // Remove from inventory - delete(f.ServiceAccountFiles, opt.ServiceAccountFile) - // Remove - opt.ServiceAccountFileEnd-- + opt.ServiceAccountFileStart++ /** * Create client and svc @@ -701,7 +702,7 @@ func (f *Fs) changeSvc(){ f.client = oAuthClient f.svc, err = drive.New(f.client) fmt.Println("gclone sa file:", opt.ServiceAccountFile) - fmt.Println("gclone sa number:", r) + fmt.Println("gclone sa number:", startSA) } // parseParse parses a drive 'url' From cc0a30dba7bf85c1da837631129ca1e299ff705f Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 03:21:09 +0200 Subject: [PATCH 15/25] Update drive.go --- backend/drive/drive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index df59336..30b577f 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -684,7 +684,7 @@ func (f *Fs) changeSvc(){ // get the range we want var sa []string = keys[startSA:endSA] for _, k := range sa { - opt.ServiceAccountFile = f.ServiceAccountFiles[k] + opt.ServiceAccountFile = k break } From cd8ad9f43b7fb292dbb1914a7c34b47355fa35d3 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 03:29:13 +0200 Subject: [PATCH 16/25] Update drive.go --- backend/drive/drive.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 30b577f..7a78517 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -660,15 +660,15 @@ func (f *Fs) changeSvc(){ } startSA := opt.ServiceAccountFileStart // If it is still 0 after reading the folder, exit - if(opt.ServiceAccountFileStart == -1){ - startSA := 0 ; + if(startSA == -1){ + startSA = 0 ; } endSA := opt.ServiceAccountFileEnd // If it is still 0 after reading the folder, exit - if(opt.ServiceAccountFileEnd == -1){ - endSA := len(f.ServiceAccountFiles) ; - } else if(opt.ServiceAccountFileEnd > len(f.ServiceAccountFiles)){ - endSA := len(f.ServiceAccountFiles) ; + if(endSA == -1){ + endSA = len(f.ServiceAccountFiles) ; + } else if(endSA > len(f.ServiceAccountFiles)){ + endSA = len(f.ServiceAccountFiles) ; } // sort the SA From d6ca59344829d9ec71cf0110bf710d205166cfea Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 04:08:35 +0200 Subject: [PATCH 17/25] Update drive.go --- backend/drive/drive.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 7a78517..87b7090 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -670,7 +670,8 @@ func (f *Fs) changeSvc(){ } else if(endSA > len(f.ServiceAccountFiles)){ endSA = len(f.ServiceAccountFiles) ; } - + fs.Debugf(f, "startSA :%d", startSA) + fs.Debugf(f, "endSA :%d", endSA) // sort the SA var keys []string for k := range f.ServiceAccountFiles { @@ -678,13 +679,14 @@ func (f *Fs) changeSvc(){ } sort.Strings(keys) if(startSA>endSA){ - fmt.Println("No more SA available !, last :%d", endSA) + fs.Debugf(f, "No more SA available !, last :%d", endSA) return ; } // get the range we want var sa []string = keys[startSA:endSA] for _, k := range sa { opt.ServiceAccountFile = k + fs.Debugf(f, "Use SA :%d", k) break } @@ -701,8 +703,8 @@ func (f *Fs) changeSvc(){ } f.client = oAuthClient f.svc, err = drive.New(f.client) - fmt.Println("gclone sa file:", opt.ServiceAccountFile) - fmt.Println("gclone sa number:", startSA) + fs.Debugf(f, "gclone sa file: %s", opt.ServiceAccountFile) + fs.Debugf(f, "gclone sa number: %d", startSA) } // parseParse parses a drive 'url' From 826931f05eea702893d1c5f559afc87f43dec1aa Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 04:30:58 +0200 Subject: [PATCH 18/25] Add files via upload --- fs/config/configflags/configflags.go | 263 +++++++++++++++++++++++++++ 1 file changed, 263 insertions(+) create mode 100644 fs/config/configflags/configflags.go diff --git a/fs/config/configflags/configflags.go b/fs/config/configflags/configflags.go new file mode 100644 index 0000000..68c014d --- /dev/null +++ b/fs/config/configflags/configflags.go @@ -0,0 +1,263 @@ +// Package configflags defines the flags used by rclone. It is +// decoupled into a separate package so it can be replaced. +package configflags + +// Options set by command line flags +import ( + "log" + "net" + "path/filepath" + "strings" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/flags" + fsLog "github.com/rclone/rclone/fs/log" + "github.com/rclone/rclone/fs/rc" + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" +) + +var ( + // these will get interpreted into fs.Config via SetFlags() below + verbose int + quiet bool + dumpHeaders bool + dumpBodies bool + deleteBefore bool + deleteDuring bool + deleteAfter bool + bindAddr string + disableFeatures string + uploadHeaders []string + downloadHeaders []string + headers []string +) + +// AddFlags adds the non filing system specific flags to the command +func AddFlags(flagSet *pflag.FlagSet) { + rc.AddOption("main", fs.Config) + // NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig + flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)") + flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible") + flags.DurationVarP(flagSet, &fs.Config.ModifyWindow, "modify-window", "", fs.Config.ModifyWindow, "Max time diff to be considered the same") + flags.IntVarP(flagSet, &fs.Config.Checkers, "checkers", "", fs.Config.Checkers, "Number of checkers to run in parallel.") + flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.") + flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.") + flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.") + flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size") + flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum") + flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files") + flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination") + flags.BoolVarP(flagSet, &fs.Config.IgnoreErrors, "ignore-errors", "", fs.Config.IgnoreErrors, "delete even if there are I/O errors") + flags.BoolVarP(flagSet, &fs.Config.DryRun, "dry-run", "n", fs.Config.DryRun, "Do a trial run with no permanent changes") + flags.BoolVarP(flagSet, &fs.Config.Interactive, "interactive", "i", fs.Config.Interactive, "Enable interactive mode") + flags.DurationVarP(flagSet, &fs.Config.ConnectTimeout, "contimeout", "", fs.Config.ConnectTimeout, "Connect timeout") + flags.DurationVarP(flagSet, &fs.Config.Timeout, "timeout", "", fs.Config.Timeout, "IO idle timeout") + flags.DurationVarP(flagSet, &fs.Config.ExpectContinueTimeout, "expect-continue-timeout", "", fs.Config.ExpectContinueTimeout, "Timeout when using expect / 100-continue in HTTP") + flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP headers - may contain sensitive info") + flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info") + flags.BoolVarP(flagSet, &fs.Config.InsecureSkipVerify, "no-check-certificate", "", fs.Config.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.") + flags.BoolVarP(flagSet, &fs.Config.AskPassword, "ask-password", "", fs.Config.AskPassword, "Allow prompt for password for encrypted configuration.") + flags.FVarP(flagSet, &fs.Config.PasswordCommand, "password-command", "", "Command for supplying password for encrypted configuration.") + flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transferring") + flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer") + flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)") + flags.Int64VarP(flagSet, &fs.Config.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes") + flags.BoolVarP(flagSet, &fs.Config.TrackRenames, "track-renames", "", fs.Config.TrackRenames, "When synchronizing, track file renames and do a server side move if possible") + flags.StringVarP(flagSet, &fs.Config.TrackRenamesStrategy, "track-renames-strategy", "", fs.Config.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf") + flags.IntVarP(flagSet, &fs.Config.LowLevelRetries, "low-level-retries", "", fs.Config.LowLevelRetries, "Number of low level retries to do.") + flags.BoolVarP(flagSet, &fs.Config.UpdateOlder, "update", "u", fs.Config.UpdateOlder, "Skip files that are newer on the destination.") + flags.BoolVarP(flagSet, &fs.Config.UseServerModTime, "use-server-modtime", "", fs.Config.UseServerModTime, "Use server modified time instead of object metadata") + flags.BoolVarP(flagSet, &fs.Config.NoGzip, "no-gzip-encoding", "", fs.Config.NoGzip, "Don't set Accept-Encoding: gzip.") + flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.") + flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.") + flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.") + flags.BoolVarP(flagSet, &fs.Config.IgnoreCaseSync, "ignore-case-sync", "", fs.Config.IgnoreCaseSync, "Ignore case when synchronizing") + flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.") + flags.BoolVarP(flagSet, &fs.Config.CheckFirst, "check-first", "", fs.Config.CheckFirst, "Do all the checks before starting transfers.") + flags.BoolVarP(flagSet, &fs.Config.NoCheckDest, "no-check-dest", "", fs.Config.NoCheckDest, "Don't check the destination, copy regardless.") + flags.BoolVarP(flagSet, &fs.Config.NoUnicodeNormalization, "no-unicode-normalization", "", fs.Config.NoUnicodeNormalization, "Don't normalize unicode characters in filenames.") + flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.") + flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "Include additional server-side path during comparison.") + flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Implies --compare-dest but also copies files from path into destination.") + flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.") + flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix to add to changed files.") + flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.") + flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.") + flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.") + flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.") + flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.") + flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features. Use help to see a list.") + flags.StringVarP(flagSet, &fs.Config.UserAgent, "user-agent", "", fs.Config.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version") + flags.BoolVarP(flagSet, &fs.Config.Immutable, "immutable", "", fs.Config.Immutable, "Do not modify files. Fail if existing files have been modified.") + flags.BoolVarP(flagSet, &fs.Config.AutoConfirm, "auto-confirm", "", fs.Config.AutoConfirm, "If enabled, do not request console confirmation.") + flags.IntVarP(flagSet, &fs.Config.StatsFileNameLength, "stats-file-name-length", "", fs.Config.StatsFileNameLength, "Max file name length in stats. 0 for no limit") + flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR") + flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR") + flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.") + flags.FVarP(flagSet, &fs.Config.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable.") + flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.") + flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.") + flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList) + flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.") + flags.DurationVarP(flagSet, &fs.Config.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for.") + flags.FVarP(flagSet, &fs.Config.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS") + flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.") + flags.IntVarP(flagSet, &fs.Config.MaxStatsGroups, "max-stats-groups", "", fs.Config.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.") + flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.") + flags.BoolVarP(flagSet, &fs.Config.StatsOneLineDate, "stats-one-line-date", "", fs.Config.StatsOneLineDate, "Enables --stats-one-line and add current date/time prefix.") + flags.StringVarP(flagSet, &fs.Config.StatsOneLineDateFormat, "stats-one-line-date-format", "", fs.Config.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format") + flags.BoolVarP(flagSet, &fs.Config.ErrorOnNoTransfer, "error-on-no-transfer", "", fs.Config.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts") + flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.") + flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.") + flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).") + flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers") + flags.StringVarP(flagSet, &fs.Config.ClientCert, "client-cert", "", fs.Config.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth") + flags.StringVarP(flagSet, &fs.Config.ClientKey, "client-key", "", fs.Config.ClientKey, "Client SSL private key (PEM) for mutual TLS auth") + flags.FVarP(flagSet, &fs.Config.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size.") + flags.IntVarP(flagSet, &fs.Config.MultiThreadStreams, "multi-thread-streams", "", fs.Config.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.") + flags.BoolVarP(flagSet, &fs.Config.UseJSONLog, "use-json-log", "", fs.Config.UseJSONLog, "Use json log format.") + flags.StringVarP(flagSet, &fs.Config.OrderBy, "order-by", "", fs.Config.OrderBy, "Instructions on how to order the transfers, eg 'size,descending'") + flags.StringArrayVarP(flagSet, &uploadHeaders, "header-upload", "", nil, "Set HTTP header for upload transactions") + flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions") + flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions") + flags.BoolVarP(flagSet, &fs.Config.RefreshTimes, "refresh-times", "", fs.Config.RefreshTimes, "Refresh the modtime of remote files.") +} + +// ParseHeaders converts the strings passed in via the header flags into HTTPOptions +func ParseHeaders(headers []string) []*fs.HTTPOption { + opts := []*fs.HTTPOption{} + for _, header := range headers { + parts := strings.SplitN(header, ":", 2) + if len(parts) == 1 { + log.Fatalf("Failed to parse '%s' as an HTTP header. Expecting a string like: 'Content-Encoding: gzip'", header) + } + option := &fs.HTTPOption{ + Key: strings.TrimSpace(parts[0]), + Value: strings.TrimSpace(parts[1]), + } + opts = append(opts, option) + } + return opts +} + +// SetFlags converts any flags into config which weren't straight forward +func SetFlags() { + if verbose >= 2 { + fs.Config.LogLevel = fs.LogLevelDebug + } else if verbose >= 1 { + fs.Config.LogLevel = fs.LogLevelInfo + } + if quiet { + if verbose > 0 { + log.Fatalf("Can't set -v and -q") + } + fs.Config.LogLevel = fs.LogLevelError + } + logLevelFlag := pflag.Lookup("log-level") + if logLevelFlag != nil && logLevelFlag.Changed { + if verbose > 0 { + log.Fatalf("Can't set -v and --log-level") + } + if quiet { + log.Fatalf("Can't set -q and --log-level") + } + } + if fs.Config.UseJSONLog { + logrus.AddHook(fsLog.NewCallerHook()) + logrus.SetFormatter(&logrus.JSONFormatter{ + TimestampFormat: "2006-01-02T15:04:05.999999-07:00", + }) + logrus.SetLevel(logrus.DebugLevel) + switch fs.Config.LogLevel { + case fs.LogLevelEmergency, fs.LogLevelAlert: + logrus.SetLevel(logrus.PanicLevel) + case fs.LogLevelCritical: + logrus.SetLevel(logrus.FatalLevel) + case fs.LogLevelError: + logrus.SetLevel(logrus.ErrorLevel) + case fs.LogLevelWarning, fs.LogLevelNotice: + logrus.SetLevel(logrus.WarnLevel) + case fs.LogLevelInfo: + logrus.SetLevel(logrus.InfoLevel) + case fs.LogLevelDebug: + logrus.SetLevel(logrus.DebugLevel) + } + } + + if dumpHeaders { + fs.Config.Dump |= fs.DumpHeaders + fs.Logf(nil, "--dump-headers is obsolete - please use --dump headers instead") + } + if dumpBodies { + fs.Config.Dump |= fs.DumpBodies + fs.Logf(nil, "--dump-bodies is obsolete - please use --dump bodies instead") + } + + switch { + case deleteBefore && (deleteDuring || deleteAfter), + deleteDuring && deleteAfter: + log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`) + case deleteBefore: + fs.Config.DeleteMode = fs.DeleteModeBefore + case deleteDuring: + fs.Config.DeleteMode = fs.DeleteModeDuring + case deleteAfter: + fs.Config.DeleteMode = fs.DeleteModeAfter + default: + fs.Config.DeleteMode = fs.DeleteModeDefault + } + + if fs.Config.CompareDest != "" && fs.Config.CopyDest != "" { + log.Fatalf(`Can't use --compare-dest with --copy-dest.`) + } + + switch { + case len(fs.Config.StatsOneLineDateFormat) > 0: + fs.Config.StatsOneLineDate = true + fs.Config.StatsOneLine = true + case fs.Config.StatsOneLineDate: + fs.Config.StatsOneLineDateFormat = "2006/01/02 15:04:05 - " + fs.Config.StatsOneLine = true + } + + if bindAddr != "" { + addrs, err := net.LookupIP(bindAddr) + if err != nil { + log.Fatalf("--bind: Failed to parse %q as IP address: %v", bindAddr, err) + } + if len(addrs) != 1 { + log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs)) + } + fs.Config.BindAddr = addrs[0] + } + + if disableFeatures != "" { + if disableFeatures == "help" { + log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", ")) + } + fs.Config.DisableFeatures = strings.Split(disableFeatures, ",") + } + + if len(uploadHeaders) != 0 { + fs.Config.UploadHeaders = ParseHeaders(uploadHeaders) + } + if len(downloadHeaders) != 0 { + fs.Config.DownloadHeaders = ParseHeaders(downloadHeaders) + } + if len(headers) != 0 { + fs.Config.Headers = ParseHeaders(headers) + } + + // Make the config file absolute + configPath, err := filepath.Abs(config.ConfigPath) + if err == nil { + config.ConfigPath = configPath + } + + // Set whether multi-thread-streams was set + multiThreadStreamsFlag := pflag.Lookup("multi-thread-streams") + fs.Config.MultiThreadSet = multiThreadStreamsFlag != nil && multiThreadStreamsFlag.Changed + +} \ No newline at end of file From 4c26ce860134bb24a65575ca3ced85db2779ccbf Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 04:34:37 +0200 Subject: [PATCH 19/25] Update configflags.go --- fs/config/configflags/configflags.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/config/configflags/configflags.go b/fs/config/configflags/configflags.go index 68c014d..3ed5fd8 100644 --- a/fs/config/configflags/configflags.go +++ b/fs/config/configflags/configflags.go @@ -123,6 +123,8 @@ func AddFlags(flagSet *pflag.FlagSet) { flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions") flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions") flags.BoolVarP(flagSet, &fs.Config.RefreshTimes, "refresh-times", "", fs.Config.RefreshTimes, "Refresh the modtime of remote files.") + flags.IntVarP(flagSet, &fs.Config.ServiceAccountFileStart, "service_account_start", "", fs.Config.ServiceAccountFileStart, "First SA to consider.") + flags.IntVarP(flagSet, &fs.Config.ServiceAccountFileEnd, "service_account_end", "", fs.Config.ServiceAccountFileEnd, "Last SA to consider.") } // ParseHeaders converts the strings passed in via the header flags into HTTPOptions @@ -260,4 +262,4 @@ func SetFlags() { multiThreadStreamsFlag := pflag.Lookup("multi-thread-streams") fs.Config.MultiThreadSet = multiThreadStreamsFlag != nil && multiThreadStreamsFlag.Changed -} \ No newline at end of file +} From 516d737327c9314841be1e1c2ab6fcc8779c06ca Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 04:48:24 +0200 Subject: [PATCH 20/25] Update drive.go --- backend/drive/drive.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 87b7090..a1b3a06 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -615,7 +615,11 @@ func (f *Fs) shouldRetry(err error) (bool, error) { // If ServiceAccountFilePath exists, call changeSvc and try again if(f.opt.ServiceAccountFilePath != ""){ f.waitChangeSvc.Lock() - f.changeSvc() + _, err := f.changeSvc() + if e != nil { + fs.Errorf(f, "Stop no more SA", err) + return false, fserrors.FatalError(err) + } f.waitChangeSvc.Unlock() return true, err } @@ -679,8 +683,8 @@ func (f *Fs) changeSvc(){ } sort.Strings(keys) if(startSA>endSA){ - fs.Debugf(f, "No more SA available !, last :%d", endSA) - return ; + fs.Errorf(f, "No more SA available !", err) + return false, errors.Errorf("No more SA available !", endSA) } // get the range we want var sa []string = keys[startSA:endSA] From 2e7f9f19aca14cbc581589e590579210ee4c34ef Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 04:52:06 +0200 Subject: [PATCH 21/25] Update drive.go --- backend/drive/drive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index a1b3a06..2bff601 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -687,7 +687,7 @@ func (f *Fs) changeSvc(){ return false, errors.Errorf("No more SA available !", endSA) } // get the range we want - var sa []string = keys[startSA:endSA] + var sa []string = keys[startSA:startSA] for _, k := range sa { opt.ServiceAccountFile = k fs.Debugf(f, "Use SA :%d", k) From 820849a6a690996ceb111188d42dd1965b05b011 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 05:06:42 +0200 Subject: [PATCH 22/25] Update drive.go --- backend/drive/drive.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 2bff601..3e491e9 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -616,7 +616,7 @@ func (f *Fs) shouldRetry(err error) (bool, error) { if(f.opt.ServiceAccountFilePath != ""){ f.waitChangeSvc.Lock() _, err := f.changeSvc() - if e != nil { + if err != nil { fs.Errorf(f, "Stop no more SA", err) return false, fserrors.FatalError(err) } @@ -638,7 +638,7 @@ func (f *Fs) shouldRetry(err error) (bool, error) { } // Replace f.svc function -func (f *Fs) changeSvc(){ +func (f *Fs) changeSvc()(bool, error) { opt := &f.opt; /** * Get the list of sa files @@ -658,10 +658,6 @@ func (f *Fs) changeSvc(){ } } - // If it is still 0 after reading the folder, exit - if(len(f.ServiceAccountFiles) <= 0){ - return ; - } startSA := opt.ServiceAccountFileStart // If it is still 0 after reading the folder, exit if(startSA == -1){ @@ -683,7 +679,7 @@ func (f *Fs) changeSvc(){ } sort.Strings(keys) if(startSA>endSA){ - fs.Errorf(f, "No more SA available !", err) + fs.Errorf(f, "No more SA available !", endSA) return false, errors.Errorf("No more SA available !", endSA) } // get the range we want @@ -709,6 +705,7 @@ func (f *Fs) changeSvc(){ f.svc, err = drive.New(f.client) fs.Debugf(f, "gclone sa file: %s", opt.ServiceAccountFile) fs.Debugf(f, "gclone sa number: %d", startSA) + return true, err } // parseParse parses a drive 'url' From a563210b4aff1e040d79100ba190f87f55c3c818 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 05:33:31 +0200 Subject: [PATCH 23/25] Update drive.go --- backend/drive/drive.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 3e491e9..aa923d3 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -658,7 +658,7 @@ func (f *Fs) changeSvc()(bool, error) { } } - startSA := opt.ServiceAccountFileStart + startSA := opt.ServiceAccountFileStart-1 // If it is still 0 after reading the folder, exit if(startSA == -1){ startSA = 0 ; @@ -678,17 +678,14 @@ func (f *Fs) changeSvc()(bool, error) { keys = append(keys, k) } sort.Strings(keys) + fs.Debugf(f, "All SA array: %v", k) if(startSA>endSA){ fs.Errorf(f, "No more SA available !", endSA) return false, errors.Errorf("No more SA available !", endSA) } // get the range we want - var sa []string = keys[startSA:startSA] - for _, k := range sa { - opt.ServiceAccountFile = k - fs.Debugf(f, "Use SA :%d", k) - break - } + opt.ServiceAccountFile = keys[startSA] + fs.Debugf(f, "Use SA :%d", opt.ServiceAccountFile) opt.ServiceAccountFileStart++ @@ -700,6 +697,7 @@ func (f *Fs) changeSvc()(bool, error) { oAuthClient, err := getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials)) if err != nil { errors.Wrap(err, "failed to create oauth client from service account") + return true, errors.Errorf("failed to create oauth client from service account", err) } f.client = oAuthClient f.svc, err = drive.New(f.client) From 0079f57ea5e1031c24c02657b2f2ffb935cd8dc5 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 05:48:58 +0200 Subject: [PATCH 24/25] Update drive.go --- backend/drive/drive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index aa923d3..33d0dec 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -678,7 +678,7 @@ func (f *Fs) changeSvc()(bool, error) { keys = append(keys, k) } sort.Strings(keys) - fs.Debugf(f, "All SA array: %v", k) + fs.Debugf(f, "All SA array: %v", keys) if(startSA>endSA){ fs.Errorf(f, "No more SA available !", endSA) return false, errors.Errorf("No more SA available !", endSA) From 2c23b9dcc362ff37dcafc15749099652b34d7152 Mon Sep 17 00:00:00 2001 From: bounty1342 Date: Sat, 12 Sep 2020 06:07:25 +0200 Subject: [PATCH 25/25] Update drive.go --- backend/drive/drive.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 33d0dec..b2561f5 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -678,15 +678,12 @@ func (f *Fs) changeSvc()(bool, error) { keys = append(keys, k) } sort.Strings(keys) - fs.Debugf(f, "All SA array: %v", keys) if(startSA>endSA){ fs.Errorf(f, "No more SA available !", endSA) return false, errors.Errorf("No more SA available !", endSA) } - // get the range we want + // get first SA opt.ServiceAccountFile = keys[startSA] - fs.Debugf(f, "Use SA :%d", opt.ServiceAccountFile) - opt.ServiceAccountFileStart++ /**