From 59377b034e4c12e82fd41dc6607064b85439e3c4 Mon Sep 17 00:00:00 2001 From: Farukhzhon Date: Thu, 23 Jul 2020 12:14:09 +0300 Subject: [PATCH 1/7] command/{cp, mv, rm}: added dry-run option --- CHANGELOG.md | 3 ++ command/cp.go | 120 ++++++++++++++++++++++++++++--------------------- command/mv.go | 1 + command/rm.go | 56 ++++++++++++++++------- e2e/mv_test.go | 49 ++++++++++++++++++++ e2e/rm_test.go | 42 +++++++++++++++++ 6 files changed, 204 insertions(+), 67 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 470b1cebc..95e0bcdef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ # Changelog +#### Features +- Added `--dry-run` option for copy, move, and remove operations. It displays which commands will be executed without actually having a side effect. ([#90](https://github.com/peak/s5cmd/issues/90)) + ## v1.1.0 - 22 Jul 2020 With this release, Windows is supported. diff --git a/command/cp.go b/command/cp.go index 632c9fce6..f9c9af2d8 100644 --- a/command/cp.go +++ b/command/cp.go @@ -70,6 +70,9 @@ Examples: 12. Perform KMS-SSE of the object(s) at the destination using customer managed Customer Master Key (CMK) key id > s5cmd {{.HelpName}} -sse aws:kms -sse-kms-key-id s3://bucket/object s3://target-bucket/prefix/object + + 13. Check what s5cmd will do, without actually doing so (check mode option) + > s5cmd {{.HelpName}} -dry-run dir/ s3://bucket/ ` var copyCommandFlags = []cli.Flag{ @@ -125,6 +128,10 @@ var copyCommandFlags = []cli.Flag{ Name: "acl", Usage: "set acl for target: defines granted accesses and their types on different accounts/groups", }, + &cli.BoolFlag{ + Name: "dry-run", + Usage: "show what commands will be executed without actually executing them", + }, } var copyCommand = &cli.Command{ @@ -149,6 +156,7 @@ var copyCommand = &cli.Command{ ifSourceNewer: c.Bool("if-source-newer"), flatten: c.Bool("flatten"), followSymlinks: !c.Bool("no-follow-symlinks"), + dryRun: c.Bool("dry-run"), storageClass: storage.StorageClass(c.String("storage-class")), concurrency: c.Int("concurrency"), partSize: c.Int64("part-size") * megabytes, @@ -174,6 +182,7 @@ type Copy struct { ifSourceNewer bool flatten bool followSymlinks bool + dryRun bool storageClass storage.StorageClass encryptionMethod string encryptionKeyID string @@ -355,20 +364,23 @@ func (c Copy) doDownload(ctx context.Context, srcurl *url.URL, dsturl *url.URL) return err } - f, err := os.Create(dsturl.Absolute()) - if err != nil { - return err - } - defer f.Close() + var size int64 + if !c.dryRun { + f, err := os.Create(dsturl.Absolute()) + if err != nil { + return err + } + defer f.Close() - size, err := srcClient.Get(ctx, srcurl, f, c.concurrency, c.partSize) - if err != nil { - _ = dstClient.Delete(ctx, dsturl) - return err - } + size, err = srcClient.Get(ctx, srcurl, f, c.concurrency, c.partSize) + if err != nil { + _ = dstClient.Delete(ctx, dsturl) + return err + } - if c.deleteSource { - _ = srcClient.Delete(ctx, srcurl) + if c.deleteSource { + _ = srcClient.Delete(ctx, srcurl) + } } msg := log.InfoMessage{ @@ -385,46 +397,50 @@ func (c Copy) doDownload(ctx context.Context, srcurl *url.URL, dsturl *url.URL) } func (c Copy) doUpload(ctx context.Context, srcurl *url.URL, dsturl *url.URL) error { - // TODO(ig): use storage abstraction - f, err := os.Open(srcurl.Absolute()) - if err != nil { - return err - } - defer f.Close() - err = c.shouldOverride(ctx, srcurl, dsturl) - if err != nil { - if errorpkg.IsWarning(err) { - printDebug(c.op, srcurl, dsturl, err) - return nil + var size int64 + if !c.dryRun { + // TODO(ig): use storage abstraction + f, err := os.Open(srcurl.Absolute()) + if err != nil { + return err } - return err - } + defer f.Close() - dstClient := storage.NewClient(dsturl) + err = c.shouldOverride(ctx, srcurl, dsturl) + if err != nil { + if errorpkg.IsWarning(err) { + printDebug(c.op, srcurl, dsturl, err) + return nil + } + return err + } - metadata := storage.NewMetadata(). - SetContentType(guessContentType(f)). - SetStorageClass(string(c.storageClass)). - SetSSE(c.encryptionMethod). - SetSSEKeyID(c.encryptionKeyID). - SetACL(c.acl) + dstClient := storage.NewClient(dsturl) - err = dstClient.Put(ctx, f, dsturl, metadata, c.concurrency, c.partSize) - if err != nil { - return err - } + metadata := storage.NewMetadata(). + SetContentType(guessContentType(f)). + SetStorageClass(string(c.storageClass)). + SetSSE(c.encryptionMethod). + SetSSEKeyID(c.encryptionKeyID). + SetACL(c.acl) - srcClient := storage.NewClient(srcurl) + err = dstClient.Put(ctx, f, dsturl, metadata, c.concurrency, c.partSize) + if err != nil { + return err + } - obj, _ := srcClient.Stat(ctx, srcurl) - size := obj.Size + srcClient := storage.NewClient(srcurl) - if c.deleteSource { - // close the file before deleting - f.Close() - if err := srcClient.Delete(ctx, srcurl); err != nil { - return err + obj, _ := srcClient.Stat(ctx, srcurl) + size = obj.Size + + if c.deleteSource { + // close the file before deleting + f.Close() + if err := srcClient.Delete(ctx, srcurl); err != nil { + return err + } } } @@ -460,15 +476,17 @@ func (c Copy) doCopy(ctx context.Context, srcurl *url.URL, dsturl *url.URL) erro return err } - err = srcClient.Copy(ctx, srcurl, dsturl, metadata) - if err != nil { - return err - } - - if c.deleteSource { - if err := srcClient.Delete(ctx, srcurl); err != nil { + if !c.dryRun { + err = srcClient.Copy(ctx, srcurl, dsturl, metadata) + if err != nil { return err } + + if c.deleteSource { + if err := srcClient.Delete(ctx, srcurl); err != nil { + return err + } + } } msg := log.InfoMessage{ diff --git a/command/mv.go b/command/mv.go index 550280965..459f7ab81 100644 --- a/command/mv.go +++ b/command/mv.go @@ -52,6 +52,7 @@ var moveCommand = &cli.Command{ ifSizeDiffer: c.Bool("if-size-differ"), ifSourceNewer: c.Bool("if-source-newer"), flatten: c.Bool("flatten"), + dryRun: c.Bool("dry-run"), storageClass: storage.StorageClass(c.String("storage-class")), encryptionMethod: c.String("sse"), encryptionKeyID: c.String("sse-kms-key-id"), diff --git a/command/rm.go b/command/rm.go index 2b195ddf0..70517f1c9 100644 --- a/command/rm.go +++ b/command/rm.go @@ -34,6 +34,9 @@ Examples: 4. Delete all matching objects and a specific object > s5cmd {{.HelpName}} s3://bucketname/prefix/* s3://bucketname/object1.gz + + 5. Check what s5cmd will do, without actually doing so (check mode option) + > s5cmd {{.HelpName}} -dry-run s3://bucket/prefix/* ` var deleteCommand = &cli.Command{ @@ -41,6 +44,12 @@ var deleteCommand = &cli.Command{ HelpName: "rm", Usage: "remove objects", CustomHelpTemplate: deleteHelpTemplate, + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "dry-run", + Usage: "show what commands will be executed without actually executing them", + }, + }, Before: func(c *cli.Context) error { if !c.Args().Present() { return fmt.Errorf("expected at least 1 object to remove") @@ -49,23 +58,28 @@ var deleteCommand = &cli.Command{ return sourcesHaveSameType(c.Args().Slice()...) }, Action: func(c *cli.Context) error { - return Delete( - c.Context, - c.Command.Name, - givenCommand(c), - c.Args().Slice()..., - ) + return Delete{ + src: c.Args().Slice(), + op: c.Command.Name, + fullCommand: givenCommand(c), + dryRun: c.Bool("dry-run"), + }.Run(c.Context) }, } -// Delete remove given sources. -func Delete( - ctx context.Context, - op string, - fullCommand string, - src ...string, -) error { - srcurls, err := newURLs(src...) +// Delete holds remove operation flags and states. +type Delete struct { + src []string + op string + fullCommand string + + // flags + dryRun bool +} + +// Run removes given sources. +func (d Delete) Run(ctx context.Context) error { + srcurls, err := newURLs(d.src...) if err != nil { return err } @@ -86,12 +100,22 @@ func Delete( } if err := object.Err; err != nil { - printError(fullCommand, op, err) + printError(d.fullCommand, d.op, err) continue } urlch <- object.URL } }() + if d.dryRun { + for u := range urlch { + msg := log.InfoMessage{ + Operation: d.op, + Source: u, + } + log.Info(msg) + } + return nil + } resultch := client.MultiDelete(ctx, urlch) @@ -107,7 +131,7 @@ func Delete( } msg := log.InfoMessage{ - Operation: op, + Operation: d.op, Source: obj.URL, } log.Info(msg) diff --git a/e2e/mv_test.go b/e2e/mv_test.go index f581db090..3d26d76a8 100644 --- a/e2e/mv_test.go +++ b/e2e/mv_test.go @@ -311,3 +311,52 @@ func TestMoveMultipleS3ObjectsToS3(t *testing.T) { assert.Assert(t, ensureS3Object(s3client, bucket, "dst/"+filename, content)) } } + +// mv --dry-run s3://bucket/* s3://bucket2/prefix/ +func TestMoveDashDryRunMultipleS3ObjectsToS3(t *testing.T) { + t.Parallel() + + bucket := s3BucketFromTestName(t) + + s3client, s5cmd, cleanup := setup(t) + defer cleanup() + + createBucket(t, s3client, bucket) + + filesToContent := map[string]string{ + "testfile1.txt": "this is a test file 1", + "readme.md": "this is a readme file", + "filename-with-hypen.gz": "file has hypen in its name", + "another_test_file.txt": "yet another txt file. yatf.", + } + + for filename, content := range filesToContent { + putFile(t, s3client, bucket, filename, content) + } + + src := fmt.Sprintf("s3://%v/*", bucket) + dst := fmt.Sprintf("s3://%v/dst/", bucket) + + cmd := s5cmd("mv", "--dry-run", src, dst) + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals("mv s3://%v/another_test_file.txt %vanother_test_file.txt", bucket, dst), + 1: equals("mv s3://%v/filename-with-hypen.gz %vfilename-with-hypen.gz", bucket, dst), + 2: equals("mv s3://%v/readme.md %vreadme.md", bucket, dst), + 3: equals("mv s3://%v/testfile1.txt %vtestfile1.txt", bucket, dst), + }, sortInput(true)) + + // expect no change on s3 source objects + for srcfile, content := range filesToContent { + assert.Assert(t, ensureS3Object(s3client, bucket, srcfile, content)) + } + + // assert no objects were copied to s3 destination + for filename, content := range filesToContent { + err := ensureS3Object(s3client, bucket, "dst/"+filename, content) + assertError(t, err, errS3NoSuchKey) + } +} diff --git a/e2e/rm_test.go b/e2e/rm_test.go index 1b803c812..5ed53eac0 100644 --- a/e2e/rm_test.go +++ b/e2e/rm_test.go @@ -126,6 +126,48 @@ func TestRemoveMultipleS3Objects(t *testing.T) { } } +// rm s3://bucket/* +func TestRemoveDashDryRunMultipleS3Objects(t *testing.T) { + t.Parallel() + + bucket := s3BucketFromTestName(t) + + s3client, s5cmd, cleanup := setup(t) + defer cleanup() + + createBucket(t, s3client, bucket) + + filesToContent := map[string]string{ + "testfile1.txt": "this is a test file 1", + "readme.md": "this is a readme file", + "filename-with-hypen.gz": "file has hypen in its name", + "another_test_file.txt": "yet another txt file. yatf.", + } + + for filename, content := range filesToContent { + putFile(t, s3client, bucket, filename, content) + } + + cmd := s5cmd("rm", "--dry-run", "s3://"+bucket+"/*") + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stderr(), map[int]compareFunc{}) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`rm s3://%v/another_test_file.txt`, bucket), + 1: equals(`rm s3://%v/filename-with-hypen.gz`, bucket), + 2: equals(`rm s3://%v/readme.md`, bucket), + 3: equals(`rm s3://%v/testfile1.txt`, bucket), + }, sortInput(true)) + + // assert s3 objects were not removed + for filename, content := range filesToContent { + assert.Assert(t, ensureS3Object(s3client, bucket, filename, content)) + } +} + // --json rm s3://bucket/* func TestRemoveMultipleS3ObjectsJSON(t *testing.T) { t.Parallel() From 4424758657bd8a93923129cdcc1c1ba87853c62a Mon Sep 17 00:00:00 2001 From: Farukhzhon Date: Tue, 28 Jul 2020 22:36:56 +0300 Subject: [PATCH 2/7] command/, readme: updates based on pr review --- README.md | 6 ++++++ command/cp.go | 2 +- command/mv.go | 2 +- command/rm.go | 2 +- command/run.go | 9 +++++++++ 5 files changed, 18 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a787fc23c..453ca514f 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,7 @@ storage services and local filesystems. - Wildcard support for all operations - Multiple arguments support for delete operation - Command file support to run commands in batches at very high execution speeds +- Dry run support for operations that have a side effect. - [S3 Transfer Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) support - Google Cloud Storage (and any other S3 API compatible service) support - Structured logging for querying command outputs @@ -179,6 +180,11 @@ they'll be deleted in a single request. Will copy all the matching objects to the given S3 prefix, respecting the source folder hierarchy. +`--dry-run` flag can be set to observe what operations will be performed without +actually having `s5cmd` carry out those operations. + + s5cmd cp --dry-run cp s3://bucket/pre/* s3://another-bucket/ + ⚠️ Copying objects (from S3 to S3) larger than 5GB is not supported yet. We have an [open ticket](https://github.com/peak/s5cmd/issues/29) to track the issue. diff --git a/command/cp.go b/command/cp.go index f9c9af2d8..71950c71b 100644 --- a/command/cp.go +++ b/command/cp.go @@ -156,7 +156,7 @@ var copyCommand = &cli.Command{ ifSourceNewer: c.Bool("if-source-newer"), flatten: c.Bool("flatten"), followSymlinks: !c.Bool("no-follow-symlinks"), - dryRun: c.Bool("dry-run"), + dryRun: c.Bool("dry-run") || c.Bool("dry-run-all"), storageClass: storage.StorageClass(c.String("storage-class")), concurrency: c.Int("concurrency"), partSize: c.Int64("part-size") * megabytes, diff --git a/command/mv.go b/command/mv.go index 459f7ab81..aee4fd90a 100644 --- a/command/mv.go +++ b/command/mv.go @@ -52,7 +52,7 @@ var moveCommand = &cli.Command{ ifSizeDiffer: c.Bool("if-size-differ"), ifSourceNewer: c.Bool("if-source-newer"), flatten: c.Bool("flatten"), - dryRun: c.Bool("dry-run"), + dryRun: c.Bool("dry-run") || c.Bool("dry-run-all"), storageClass: storage.StorageClass(c.String("storage-class")), encryptionMethod: c.String("sse"), encryptionKeyID: c.String("sse-kms-key-id"), diff --git a/command/rm.go b/command/rm.go index 70517f1c9..4d22678f6 100644 --- a/command/rm.go +++ b/command/rm.go @@ -62,7 +62,7 @@ var deleteCommand = &cli.Command{ src: c.Args().Slice(), op: c.Command.Name, fullCommand: givenCommand(c), - dryRun: c.Bool("dry-run"), + dryRun: c.Bool("dry-run") || c.Bool("dry-run-all"), }.Run(c.Context) }, } diff --git a/command/run.go b/command/run.go index 3afaf03f0..dfa60799e 100644 --- a/command/run.go +++ b/command/run.go @@ -32,10 +32,19 @@ Examples: > cat commands.txt | s5cmd {{.HelpName}} ` +var runCommandFlags = []cli.Flag{ + &cli.BoolFlag{ + Name: "dry-run-all", + Aliases: []string{"dry-run"}, + Usage: "check what s5cmd will do, without making it perform any operations with side effects", + }, +} + var runCommand = &cli.Command{ Name: "run", HelpName: "run", Usage: "run commands in batch", + Flags: runCommandFlags, CustomHelpTemplate: runHelpTemplate, Before: func(c *cli.Context) error { if c.Args().Len() > 1 { From 5f4463accc9a210a6139508430e0b52794d74b01 Mon Sep 17 00:00:00 2001 From: Farukhzhon Date: Wed, 29 Jul 2020 15:48:40 +0300 Subject: [PATCH 3/7] e2e/run_test, changelog: updates based on pr review --- CHANGELOG.md | 2 +- e2e/run_test.go | 50 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 95e0bcdef..b6bb7a44d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ # Changelog #### Features -- Added `--dry-run` option for copy, move, and remove operations. It displays which commands will be executed without actually having a side effect. ([#90](https://github.com/peak/s5cmd/issues/90)) +- Added `--dry-run` option for run, copy, move and remove operations. It displays which commands will be executed without actually having a side effect. ([#90](https://github.com/peak/s5cmd/issues/90)) ## v1.1.0 - 22 Jul 2020 diff --git a/e2e/run_test.go b/e2e/run_test.go index 750deb505..f6c82f080 100644 --- a/e2e/run_test.go +++ b/e2e/run_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "gotest.tools/v3/assert" "gotest.tools/v3/fs" "gotest.tools/v3/icmd" ) @@ -232,3 +233,52 @@ func TestRunSpecialCharactersInPrefix(t *testing.T) { assertLines(t, result.Stderr(), map[int]compareFunc{}) } + +func TestRunDryRunAll(t *testing.T) { + t.Parallel() + + bucket := s3BucketFromTestName(t) + + s3client, s5cmd, cleanup := setup(t) + defer cleanup() + + createBucket(t, s3client, bucket) + + files := [...]string{"cpfile.txt", "mvfile.txt", "rmfile.txt"} + for _, f := range files { + putFile(t, s3client, bucket, f, "content") + } + + filecontent := []string{ + fmt.Sprintf("cp s3://%v/%s s3://%v/cp_%s", bucket, files[0], bucket, files[0]), + fmt.Sprintf("mv s3://%v/%s s3://%v/mv_%s", bucket, files[1], bucket, files[1]), + fmt.Sprintf("rm s3://%v/%s", bucket, files[2]), + } + + file := fs.NewFile(t, "prefix", fs.WithContent(strings.Join(filecontent, "\n"))) + defer file.Remove() + + cmd := s5cmd("run", "--dry-run", file.Path()) + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(filecontent[0]), + 1: equals(filecontent[1]), + 2: equals(filecontent[2]), + }, sortInput(true)) + + // ensure no side effect for copy operation + err := ensureS3Object(s3client, bucket, "cp_"+files[0], "content") + assertError(t, err, errS3NoSuchKey) + + // ensure no side effect for move operation + assert.Assert(t, ensureS3Object(s3client, bucket, files[1], "content")) + + err = ensureS3Object(s3client, bucket, "mv_"+files[1], "content") + assertError(t, err, errS3NoSuchKey) + + // ensure no side effect for remove operation + assert.Assert(t, ensureS3Object(s3client, bucket, files[2], "content")) +} From 38e0d11bca24389541181975ff36320e9e4035e0 Mon Sep 17 00:00:00 2001 From: Farukhzhon Date: Tue, 4 Aug 2020 17:13:31 +0300 Subject: [PATCH 4/7] command/cp: updates based on pr review --- command/cp.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/command/cp.go b/command/cp.go index 71950c71b..eee1deb4a 100644 --- a/command/cp.go +++ b/command/cp.go @@ -71,8 +71,8 @@ Examples: 12. Perform KMS-SSE of the object(s) at the destination using customer managed Customer Master Key (CMK) key id > s5cmd {{.HelpName}} -sse aws:kms -sse-kms-key-id s3://bucket/object s3://target-bucket/prefix/object - 13. Check what s5cmd will do, without actually doing so (check mode option) - > s5cmd {{.HelpName}} -dry-run dir/ s3://bucket/ + 13. Check what s5cmd will do, without actually doing so + > s5cmd {{.HelpName}} --dry-run dir/ s3://bucket/ ` var copyCommandFlags = []cli.Flag{ From ddc285af921097649031df93a004401a0522ef04 Mon Sep 17 00:00:00 2001 From: Farukhzhon Date: Sun, 9 Aug 2020 02:53:26 +0300 Subject: [PATCH 5/7] command/cp: updates based on pr review --- command/cp.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/command/cp.go b/command/cp.go index eee1deb4a..6a3e102f3 100644 --- a/command/cp.go +++ b/command/cp.go @@ -310,7 +310,7 @@ func (c Copy) prepareDownloadTask( isBatch bool, ) func() error { return func() error { - dsturl, err := prepareLocalDestination(ctx, srcurl, dsturl, c.flatten, isBatch) + dsturl, err := prepareLocalDestination(ctx, srcurl, dsturl, c.flatten, isBatch, c.dryRun) if err != nil { return err } @@ -582,13 +582,14 @@ func prepareLocalDestination( dsturl *url.URL, flatten bool, isBatch bool, + dryRun bool, ) (*url.URL, error) { objname := srcurl.Base() if isBatch && !flatten { objname = srcurl.Relative() } - if isBatch { + if isBatch && !dryRun { if err := os.MkdirAll(dsturl.Absolute(), os.ModePerm); err != nil { return nil, err } @@ -603,15 +604,20 @@ func prepareLocalDestination( if isBatch && !flatten { dsturl = dsturl.Join(objname) - if err := os.MkdirAll(dsturl.Dir(), os.ModePerm); err != nil { + if dryRun { + // dryRun => no side effects. + } else if err := os.MkdirAll(dsturl.Dir(), os.ModePerm); err != nil { return nil, err } } if err == storage.ErrGivenObjectNotFound { - if err := os.MkdirAll(dsturl.Dir(), os.ModePerm); err != nil { + if dryRun { + // dryRun => no side effects. + } else if err := os.MkdirAll(dsturl.Dir(), os.ModePerm); err != nil { return nil, err } + if strings.HasSuffix(dsturl.Absolute(), "/") { dsturl = dsturl.Join(objname) } From d4008a94b763583831fd0639a6e3bb6c3e3312b8 Mon Sep 17 00:00:00 2001 From: Farukhzhon Date: Mon, 10 Aug 2020 00:45:01 +0300 Subject: [PATCH 6/7] e2e/cp_test: updates based on pr review --- e2e/cp_test.go | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/e2e/cp_test.go b/e2e/cp_test.go index a7d06a4b0..9387505ce 100644 --- a/e2e/cp_test.go +++ b/e2e/cp_test.go @@ -24,6 +24,7 @@ package e2e import ( "fmt" + "os" "path/filepath" "runtime" "testing" @@ -2701,3 +2702,77 @@ func TestCopyWithNoFollowSymlink(t *testing.T) { // assert s3 objects assert.Assert(t, ensureS3Object(s3client, bucket, "prefix/a/f1.txt", fileContent)) } + +// cp --dry-run dir/ s3://bucket/ +func TestCopyDashDryRunDirToS3(t *testing.T) { + t.Parallel() + + bucket := s3BucketFromTestName(t) + + s3client, s5cmd, cleanup := setup(t) + defer cleanup() + + createBucket(t, s3client, bucket) + + folderLayout := []fs.PathOp{ + fs.WithFile("file1.txt", "content"), + fs.WithDir( + "c", + fs.WithFile("file2.txt", "content"), + ), + } + + workdir := fs.NewDir(t, t.Name(), folderLayout...) + defer workdir.Remove() + + srcpath := filepath.ToSlash(workdir.Path()) + dstpath := fmt.Sprintf("s3://%v/", bucket) + + cmd := s5cmd("cp", "--dry-run", workdir.Path()+"/", dstpath) + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals(`cp %v/c/file2.txt %vc/file2.txt`, srcpath, dstpath), + 1: equals(`cp %v/file1.txt %vfile1.txt`, srcpath, dstpath), + }, sortInput(true)) + + // assert no change in s3 + objs := []string{"c/file2.txt", "file1.txt"} + for _, obj := range objs { + err := ensureS3Object(s3client, bucket, obj, "content") + assertError(t, err, errS3NoSuchKey) + } +} + +// cp --dry-run s3://bucket/* dir/ +func TestCopyDashDryRunS3ToDir(t *testing.T) { + t.Parallel() + + bucket := s3BucketFromTestName(t) + + s3client, s5cmd, cleanup := setup(t) + defer cleanup() + + createBucket(t, s3client, bucket) + + putFile(t, s3client, bucket, "file1.txt", "content") + putFile(t, s3client, bucket, "c/file2.txt", "content") + + srcpath := fmt.Sprintf("s3://%s", bucket) + + cmd := s5cmd("cp", "--dry-run", srcpath+"/*", "dir/") + result := icmd.RunCmd(cmd) + + result.Assert(t, icmd.Success) + + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: equals("cp %v/c/file2.txt dir/c/file2.txt", srcpath), + 1: equals("cp %v/file1.txt dir/file1.txt", srcpath), + }, sortInput(true)) + + // not even outermost directory should be created + _, err := os.Stat(cmd.Dir + "/dir") + assert.Assert(t, os.IsNotExist(err)) +} From dfa49b9eab5563e74c2b4a18d07f7727317274d1 Mon Sep 17 00:00:00 2001 From: Farukhzhon Date: Fri, 14 Aug 2020 15:58:49 +0300 Subject: [PATCH 7/7] command/rm, e2e: updates based on pr review --- command/rm.go | 2 +- e2e/cp_test.go | 19 ++++++++-- e2e/run_test.go | 98 ++++++++++++++++++++++++++++++++++++------------- 3 files changed, 89 insertions(+), 30 deletions(-) diff --git a/command/rm.go b/command/rm.go index b80c94786..2c6a1e6b8 100644 --- a/command/rm.go +++ b/command/rm.go @@ -35,7 +35,7 @@ Examples: 4. Delete all matching objects and a specific object > s5cmd {{.HelpName}} s3://bucketname/prefix/* s3://bucketname/object1.gz - 5. Check what s5cmd will do, without actually doing so (check mode option) + 5. Check what s5cmd will do, without actually doing so > s5cmd {{.HelpName}} --dry-run s3://bucket/prefix/* ` diff --git a/e2e/cp_test.go b/e2e/cp_test.go index 9387505ce..f4600a49b 100644 --- a/e2e/cp_test.go +++ b/e2e/cp_test.go @@ -2744,6 +2744,10 @@ func TestCopyDashDryRunDirToS3(t *testing.T) { err := ensureS3Object(s3client, bucket, obj, "content") assertError(t, err, errS3NoSuchKey) } + + // assert local filesystem + expected := fs.Expected(t, folderLayout...) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) } // cp --dry-run s3://bucket/* dir/ @@ -2757,8 +2761,10 @@ func TestCopyDashDryRunS3ToDir(t *testing.T) { createBucket(t, s3client, bucket) - putFile(t, s3client, bucket, "file1.txt", "content") - putFile(t, s3client, bucket, "c/file2.txt", "content") + files := [...]string{"c/file2.txt", "file1.txt"} + + putFile(t, s3client, bucket, files[0], "content") + putFile(t, s3client, bucket, files[1], "content") srcpath := fmt.Sprintf("s3://%s", bucket) @@ -2768,11 +2774,16 @@ func TestCopyDashDryRunS3ToDir(t *testing.T) { result.Assert(t, icmd.Success) assertLines(t, result.Stdout(), map[int]compareFunc{ - 0: equals("cp %v/c/file2.txt dir/c/file2.txt", srcpath), - 1: equals("cp %v/file1.txt dir/file1.txt", srcpath), + 0: equals("cp %v/c/file2.txt dir/%s", srcpath, files[0]), + 1: equals("cp %v/file1.txt dir/%s", srcpath, files[1]), }, sortInput(true)) // not even outermost directory should be created _, err := os.Stat(cmd.Dir + "/dir") assert.Assert(t, os.IsNotExist(err)) + + // assert s3 + for _, f := range files { + assert.Assert(t, ensureS3Object(s3client, bucket, f, "content")) + } } diff --git a/e2e/run_test.go b/e2e/run_test.go index f6c82f080..a541e8c3a 100644 --- a/e2e/run_test.go +++ b/e2e/run_test.go @@ -234,9 +234,14 @@ func TestRunSpecialCharactersInPrefix(t *testing.T) { assertLines(t, result.Stderr(), map[int]compareFunc{}) } -func TestRunDryRunAll(t *testing.T) { +func TestRunDryRun(t *testing.T) { t.Parallel() + const ( + content = "content" + pre = "prefix" + ) + bucket := s3BucketFromTestName(t) s3client, s5cmd, cleanup := setup(t) @@ -246,39 +251,82 @@ func TestRunDryRunAll(t *testing.T) { files := [...]string{"cpfile.txt", "mvfile.txt", "rmfile.txt"} for _, f := range files { - putFile(t, s3client, bucket, f, "content") + putFile(t, s3client, bucket, f, content) } - filecontent := []string{ - fmt.Sprintf("cp s3://%v/%s s3://%v/cp_%s", bucket, files[0], bucket, files[0]), - fmt.Sprintf("mv s3://%v/%s s3://%v/mv_%s", bucket, files[1], bucket, files[1]), - fmt.Sprintf("rm s3://%v/%s", bucket, files[2]), + testcases := []struct { + filecontent []string + expectedOut map[int]compareFunc // if different output is expected than the command itself + + runFlag string + }{ + { + filecontent: []string{ + fmt.Sprintf("cp s3://%v/%s s3://%v/%s", bucket, files[0], bucket, pre+files[0]), + fmt.Sprintf("mv s3://%v/%s s3://%v/%s", bucket, files[1], bucket, pre+files[1]), + fmt.Sprintf("rm s3://%v/%s", bucket, files[2]), + }, + runFlag: "--dry-run-all", + }, + { + filecontent: []string{ + fmt.Sprintf("cp s3://%v/%s s3://%v/%s", bucket, files[0], bucket, pre+files[0]), + fmt.Sprintf("mv s3://%v/%s s3://%v/%s", bucket, files[1], bucket, pre+files[1]), + fmt.Sprintf("rm s3://%v/%s", bucket, files[2]), + }, + runFlag: "--dry-run", + }, + { + filecontent: []string{ + fmt.Sprintf("cp --dry-run s3://%v/%s s3://%v/%s", bucket, files[0], bucket, pre+files[0]), + }, + expectedOut: map[int]compareFunc{ + 0: equals(fmt.Sprintf("cp s3://%v/%s s3://%v/%s", bucket, files[0], bucket, pre+files[0])), + }, + runFlag: "--dry-run-all", + }, + { + filecontent: []string{ + fmt.Sprintf("cp --dry-run s3://%v/%s s3://%v/%s", bucket, files[0], bucket, pre+files[0]), + }, + expectedOut: map[int]compareFunc{ + 0: equals(fmt.Sprintf("cp s3://%v/%s s3://%v/%s", bucket, files[0], bucket, pre+files[0])), + }, + runFlag: "--dry-run", + }, } - file := fs.NewFile(t, "prefix", fs.WithContent(strings.Join(filecontent, "\n"))) - defer file.Remove() + for _, tc := range testcases { + tc := tc + file := fs.NewFile(t, "prefix", fs.WithContent(strings.Join(tc.filecontent, "\n"))) - cmd := s5cmd("run", "--dry-run", file.Path()) - result := icmd.RunCmd(cmd) + cmd := s5cmd("run", tc.runFlag, file.Path()) + result := icmd.RunCmd(cmd) - result.Assert(t, icmd.Success) + result.Assert(t, icmd.Success) - assertLines(t, result.Stdout(), map[int]compareFunc{ - 0: equals(filecontent[0]), - 1: equals(filecontent[1]), - 2: equals(filecontent[2]), - }, sortInput(true)) + expectedLines := map[int]compareFunc{} + for i, c := range tc.filecontent { + expectedLines[i] = equals(c) - // ensure no side effect for copy operation - err := ensureS3Object(s3client, bucket, "cp_"+files[0], "content") - assertError(t, err, errS3NoSuchKey) + // ensure no copy to destination + if strings.HasPrefix(c, "cp") || strings.HasPrefix(c, "mv") { + err := ensureS3Object(s3client, bucket, pre+files[i], content) + assertError(t, err, errS3NoSuchKey) + } - // ensure no side effect for move operation - assert.Assert(t, ensureS3Object(s3client, bucket, files[1], "content")) + // ensure no delete + if strings.HasPrefix(c, "rm") || strings.HasPrefix(c, "mv") { + assert.Assert(t, ensureS3Object(s3client, bucket, files[i], content)) + } + } - err = ensureS3Object(s3client, bucket, "mv_"+files[1], "content") - assertError(t, err, errS3NoSuchKey) + if tc.expectedOut != nil { + assertLines(t, result.Stdout(), tc.expectedOut, sortInput(true)) + } else { + assertLines(t, result.Stdout(), expectedLines, sortInput(true)) + } - // ensure no side effect for remove operation - assert.Assert(t, ensureS3Object(s3client, bucket, files[2], "content")) + file.Remove() + } }