From fe58afcd39a70d7013d0ff899e34f32b6e86441c Mon Sep 17 00:00:00 2001 From: Sveinn Date: Mon, 15 Apr 2024 10:10:25 -0500 Subject: [PATCH] Large MC Update (new encryption flags, functional test suite, removal of session code, minor cleanup, vuln. updates ) (#4882) --- .github/workflows/go.yml | 3 + Makefile | 4 +- README.md | 84 +- cmd/admin-kms-key-create.go | 2 +- cmd/cat-main.go | 13 +- cmd/client-fs_linux.go | 6 +- cmd/client-s3.go | 2 + cmd/common-methods.go | 62 - cmd/common-methods_test.go | 71 - cmd/cp-main.go | 344 +-- cmd/diff-main.go | 2 +- cmd/du-main.go | 14 +- cmd/encryption-methods.go | 252 ++ cmd/encryption-methods_test.go | 134 + cmd/error.go | 17 + cmd/find-main.go | 2 +- cmd/flags.go | 34 +- cmd/get-main.go | 31 +- cmd/head-main.go | 23 +- cmd/ilm-restore.go | 6 +- cmd/main.go | 10 +- cmd/mirror-main.go | 24 +- cmd/mv-main.go | 110 +- cmd/pipe-main.go | 25 +- cmd/put-main.go | 33 +- cmd/rm-main.go | 31 +- cmd/scan-bar.go | 58 - cmd/session-migrate.go | 168 -- cmd/session-old.go | 149 -- cmd/session-v8.go | 400 --- cmd/session.go | 134 - cmd/session_test.go | 73 - cmd/share-download-main.go | 4 +- cmd/share-list-main.go | 2 +- cmd/share-upload-main.go | 2 +- cmd/share.go | 6 +- cmd/sql-main.go | 8 +- cmd/stat-main.go | 17 +- cmd/suite_test.go | 2887 ++++++++++++++++++++++ cmd/typed-errors.go | 45 +- cmd/utils.go | 104 - cmd/utils_test.go | 79 - docs/LICENSE | 395 --- docs/MAINTAINERS.md | 44 - docs/minio-admin-complete-guide.md | 1175 --------- docs/minio-client-complete-guide.md | 2305 ----------------- docs/minio-client-configuration-files.md | 92 - go.mod | 53 +- go.sum | 103 +- 49 files changed, 3673 insertions(+), 5969 deletions(-) delete mode 100644 cmd/common-methods_test.go create mode 100644 cmd/encryption-methods.go create mode 100644 cmd/encryption-methods_test.go delete mode 100644 cmd/scan-bar.go delete mode 100644 cmd/session-migrate.go delete mode 100644 cmd/session-old.go delete mode 100644 cmd/session-v8.go delete mode 100644 cmd/session.go delete mode 100644 cmd/session_test.go create mode 100644 cmd/suite_test.go delete mode 100644 docs/LICENSE delete mode 100644 docs/MAINTAINERS.md delete mode 100644 docs/minio-admin-complete-guide.md delete mode 100644 docs/minio-client-complete-guide.md delete mode 100644 docs/minio-client-configuration-files.md diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index e457b77de0..3b8b66eed3 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -49,6 +49,9 @@ jobs: ENABLE_HTTPS: 1 MINIO_CI_CD: 1 SERVER_ENDPOINT: localhost:9000 + MC_TEST_ENABLE_HTTPS: true + MC_TEST_SKIP_INSECURE: true + MC_TEST_SKIP_BUILD: true run: | wget https://dl.min.io/server/minio/release/linux-amd64/minio && chmod +x minio mkdir -p ~/.minio/certs/ && cp testdata/localhost.crt ~/.minio/certs/public.crt && cp testdata/localhost.key ~/.minio/certs/private.key diff --git a/Makefile b/Makefile index 62d728145d..5d2500ae26 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ test: verifiers build @echo "Running unit tests" @GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null @echo "Running functional tests" - @(env bash $(PWD)/functional-tests.sh) + @GO111MODULE=on MC_TEST_RUN_FULL_SUITE=true go test -race -v --timeout 20m ./... -run Test_FullSuite test-race: verifiers build @echo "Running unit tests under -race" @@ -54,7 +54,7 @@ verify: @echo "Verifying build with race" @GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/mc 1>/dev/null @echo "Running functional tests" - @(env bash $(PWD)/functional-tests.sh) + @GO111MODULE=on MC_TEST_RUN_FULL_SUITE=true go test -race -v --timeout 20m ./... -run Test_FullSuite # Builds mc locally. build: checks diff --git a/README.md b/README.md index 2212527855..7eb5cc5ff3 100644 --- a/README.md +++ b/README.md @@ -1,41 +1,53 @@ # MinIO Client Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Go Report Card](https://goreportcard.com/badge/minio/mc)](https://goreportcard.com/report/minio/mc) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/mc.svg?maxAge=604800)](https://hub.docker.com/r/minio/mc/) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/mc/blob/master/LICENSE) +# Documentation +- [MC documentation](https://min.io/docs/minio/linux/reference/minio-mc.html) + MinIO Client (mc) provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff, find etc. It supports filesystems and Amazon S3 compatible cloud storage service (AWS Signature v2 and v4). ``` -alias set, remove and list aliases in configuration file -ls list buckets and objects -mb make a bucket -rb remove a bucket -cp copy objects -mirror synchronize object(s) to a remote site -cat display object contents -head display first 'n' lines of an object -pipe stream STDIN to an object -share generate URL for temporary access to an object -find search for objects -sql run sql queries on objects -stat show object metadata -mv move objects -tree list buckets and objects in a tree format -du summarize disk usage recursively -retention set retention for object(s) -legalhold set legal hold for object(s) -diff list differences in object name, size, and date between two buckets -rm remove objects -encrypt manage bucket encryption config -event manage object notifications -watch listen for object notification events -undo undo PUT/DELETE operations -anonymous manage anonymous access to buckets and objects -tag manage tags for bucket(s) and object(s) -ilm manage bucket lifecycle -version manage bucket versioning -replicate configure server side bucket replication -admin manage MinIO servers -update update mc to latest release -ping perform liveness check + alias manage server credentials in configuration file + admin manage MinIO servers + anonymous manage anonymous access to buckets and objects + batch manage batch jobs + cp copy objects + cat display object contents + diff list differences in object name, size, and date between two buckets + du summarize disk usage recursively + encrypt manage bucket encryption config + event manage object notifications + find search for objects + get get s3 object to local + head display first 'n' lines of an object + ilm manage bucket lifecycle + idp manage MinIO IDentity Provider server configuration + license license related commands + legalhold manage legal hold for object(s) + ls list buckets and objects + mb make a bucket + mv move objects + mirror synchronize object(s) to a remote site + od measure single stream upload and download + ping perform liveness check + pipe stream STDIN to an object + put upload an object to a bucket + quota manage bucket quota + rm remove object(s) + retention set retention for object(s) + rb remove a bucket + replicate configure server side bucket replication + ready checks if the cluster is ready or not + sql run sql queries on objects + stat show object metadata + support support related commands + share generate URL for temporary access to an object + tree list buckets and objects in a tree format + tag manage tags for bucket and object(s) + undo undo PUT/DELETE operations + update update mc to latest release + version manage bucket versioning + watch listen for object notification events ``` ## Docker Container @@ -176,9 +188,6 @@ Get your AccessKeyID and SecretAccessKey by following [Google Credentials Guide] mc alias set gcs https://storage.googleapis.com BKIKJAA5BMMU2RHO6IBB V8f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12 ``` -### Example - IBM Cloud Object Storage -See [the complete guide](docs/minio-client-complete-guide.md) for IBM instructions. - ## Test Your Setup `mc` is pre-configured with https://play.min.io, aliased as "play". It is a hosted MinIO server for testing and development purpose. To test Amazon S3, simply replace "play" with "s3" or the alias you used at the time of setup. @@ -236,11 +245,6 @@ admin config diff find ls mirror policy session sql cat cp event head mb pipe rm share stat version ``` -## Explore Further -- [MinIO Client Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc.html?ref=gh) -- [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux?ref=gh) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html?ref=gh) - ## Contribute to MinIO Project Please follow MinIO [Contributor's Guide](https://github.com/minio/mc/blob/master/CONTRIBUTING.md) diff --git a/cmd/admin-kms-key-create.go b/cmd/admin-kms-key-create.go index cc9735b8be..19f3731807 100644 --- a/cmd/admin-kms-key-create.go +++ b/cmd/admin-kms-key-create.go @@ -30,7 +30,7 @@ import ( var adminKMSCreateKeyCmd = cli.Command{ Name: "create", - Usage: "creates a new master key at the KMS", + Usage: "creates a new master KMS key", Action: mainAdminKMSCreateKey, OnUsageError: onUsageError, Before: setGlobalsFromContext, diff --git a/cmd/cat-main.go b/cmd/cat-main.go index 131c86820c..b0268360b8 100644 --- a/cmd/cat-main.go +++ b/cmd/cat-main.go @@ -64,7 +64,7 @@ var catCmd = cli.Command{ Action: mainCat, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(catFlags, ioFlags...), globalFlags...), + Flags: append(append(catFlags, encCFlag), globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -74,8 +74,6 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values EXAMPLES: 1. Stream an object from Amazon S3 cloud storage to mplayer standard input. @@ -88,11 +86,11 @@ EXAMPLES: {{.Prompt}} {{.HelpName}} part.* > complete.img 4. Save an encrypted object from Amazon S3 cloud storage to a local file. - {{.Prompt}} {{.HelpName}} --encrypt-key 's3/mysql-backups=32byteslongsecretkeymustbegiven1' s3/mysql-backups/backups-201810.gz > /mnt/data/recent.gz + {{.Prompt}} {{.HelpName}} --enc-c "play/my-bucket/=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" s3/mysql-backups/backups-201810.gz > /mnt/data/recent.gz 5. Display the content of encrypted object. In case the encryption key contains non-printable character like tab, pass the base64 encoded string as key. - {{.Prompt}} {{.HelpName}} --encrypt-key "play/my-bucket/=MzJieXRlc2xvbmdzZWNyZXRrZQltdXN0YmVnaXZlbjE=" play/my-bucket/my-object + {{.Prompt}} {{.HelpName}} --enc-c "play/my-bucket/=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" play/my-bucket/my-object 6. Display the content of an object 10 days earlier {{.Prompt}} {{.HelpName}} --rewind 10d play/my-bucket/my-object @@ -323,15 +321,12 @@ func mainCat(cliCtx *cli.Context) error { ctx, cancelCat := context.WithCancel(globalContext) defer cancelCat() - // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) + encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx) fatalIf(err, "Unable to parse encryption keys.") // check 'cat' cli arguments. o := parseCatSyntax(cliCtx) - // Set command flags from context. - // handle std input data. if o.stdinMode { fatalIf(catOut(os.Stdin, -1).Trace(), "Unable to read from standard input.") diff --git a/cmd/client-fs_linux.go b/cmd/client-fs_linux.go index f5de5a48c4..41ffe3f05b 100644 --- a/cmd/client-fs_linux.go +++ b/cmd/client-fs_linux.go @@ -68,8 +68,7 @@ func IsDeleteEvent(event notify.Event) bool { return false } -// getXAttr fetches the extended attribute for a particular key on -// file +// getXAttr fetches the extended attribute for a particular key on file func getXAttr(path, key string) (string, error) { data, e := xattr.Get(path, key) if e != nil { @@ -81,8 +80,7 @@ func getXAttr(path, key string) (string, error) { return hex.EncodeToString(data), nil } -// getAllXattrs returns the extended attributes for a file if supported -// by the OS +// getAllXattrs returns the extended attributes for a file if supported by the OS func getAllXattrs(path string) (map[string]string, error) { xMetadata := make(map[string]string) list, e := xattr.List(path) diff --git a/cmd/client-s3.go b/cmd/client-s3.go index ad6a51c39e..139c18590b 100644 --- a/cmd/client-s3.go +++ b/cmd/client-s3.go @@ -92,6 +92,8 @@ const ( AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date" // AmzObjectLockLegalHold sets object lock legal hold AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold" + amzObjectSSEKMSKeyID = "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + amzObjectSSE = "X-Amz-Server-Side-Encryption" ) type dialContext func(ctx context.Context, network, addr string) (net.Conn, error) diff --git a/cmd/common-methods.go b/cmd/common-methods.go index 84e826b5c2..5a789f141d 100644 --- a/cmd/common-methods.go +++ b/cmd/common-methods.go @@ -19,7 +19,6 @@ package cmd import ( "context" - "encoding/base64" "errors" "io" "net/http" @@ -33,73 +32,12 @@ import ( "golang.org/x/net/http/httpguts" "github.com/dustin/go-humanize" - "github.com/minio/cli" "github.com/minio/mc/pkg/probe" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/pkg/v2/env" ) -// decode if the key is encoded key and returns the key -func getDecodedKey(sseKeys string) (key string, err *probe.Error) { - keyString := "" - for i, sse := range strings.Split(sseKeys, ",") { - if i > 0 { - keyString = keyString + "," - } - sseString, err := parseKey(sse) - if err != nil { - return "", err - } - keyString = keyString + sseString - } - return keyString, nil -} - -// Validate the key -func parseKey(sseKeys string) (sse string, err *probe.Error) { - encryptString := strings.SplitN(sseKeys, "=", 2) - if len(encryptString) < 2 { - return "", probe.NewError(errors.New("SSE-C prefix should be of the form prefix1=key1,... ")) - } - - secretValue := encryptString[1] - if len(secretValue) == 32 { - return sseKeys, nil - } - decodedString, e := base64.StdEncoding.DecodeString(secretValue) - if e != nil || len(decodedString) != 32 { - return "", probe.NewError(errors.New("Encryption key should be 32 bytes plain text key or 44 bytes base64 encoded key")) - } - return encryptString[0] + "=" + string(decodedString), nil -} - -// parse and return encryption key pairs per alias. -func getEncKeys(ctx *cli.Context) (map[string][]prefixSSEPair, *probe.Error) { - sseServer := ctx.String("encrypt") - var sseKeys string - if keyPrefix := ctx.String("encrypt-key"); keyPrefix != "" { - if sseServer != "" && strings.Contains(keyPrefix, sseServer) { - return nil, errConflictSSE(sseServer, keyPrefix).Trace(ctx.Args()...) - } - sseKeys = keyPrefix - } - var err *probe.Error - if sseKeys != "" { - sseKeys, err = getDecodedKey(sseKeys) - if err != nil { - return nil, err.Trace(sseKeys) - } - } - - encKeyDB, err := parseAndValidateEncryptionKeys(sseKeys, sseServer) - if err != nil { - return nil, err.Trace(sseKeys) - } - - return encKeyDB, nil -} - // Check if the passed URL represents a folder. It may or may not exist yet. // If it exists, we can easily check if it is a folder, if it doesn't exist, // we can guess if the url is a folder from how it looks. diff --git a/cmd/common-methods_test.go b/cmd/common-methods_test.go deleted file mode 100644 index 2e04bd7792..0000000000 --- a/cmd/common-methods_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "errors" - "reflect" - "testing" -) - -func TestGetDecodedKey(t *testing.T) { - getDecodeCases := []struct { - input string - output string - err error - status bool - }{ - // success scenario the key contains non printable (tab) character as key - {"s3/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=", "s3/documents/=32byteslongsecreabcdefg givenn21", nil, true}, - // success scenario the key contains non printable (tab character) as key - {"s3/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=,play/documents/=MzJieXRlc2xvbmdzZWNyZXRrZQltdXN0YmVnaXZlbjE=", "s3/documents/=32byteslongsecreabcdefg givenn21,play/documents/=32byteslongsecretke mustbegiven1", nil, true}, - // success scenario using a normal string - {"s3/documents/=32byteslongsecretkeymustbegiven1", "s3/documents/=32byteslongsecretkeymustbegiven1", nil, true}, - // success scenario using a normal string - {"s3/documents/=32byteslongsecretkeymustbegiven1,myminio/documents/=32byteslongsecretkeymustbegiven2", "s3/documents/=32byteslongsecretkeymustbegiven1,myminio/documents/=32byteslongsecretkeymustbegiven2", nil, true}, - // success scenario using a mix of normal string and encoded string - {"s3/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=,play/documents/=32byteslongsecretkeymustbegiven1", "s3/documents/=32byteslongsecreabcdefg givenn21,play/documents/=32byteslongsecretkeymustbegiven1", nil, true}, - // success scenario using a mix of normal string and encoded string - {"play/documents/=32byteslongsecretkeymustbegiven1,s3/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=", "play/documents/=32byteslongsecretkeymustbegiven1,s3/documents/=32byteslongsecreabcdefg givenn21", nil, true}, - // decoded key less than 32 char and conatin non printable (tab) character - {"s3/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE", "", errors.New("Encryption key should be 32 bytes plain text key or 44 bytes base64 encoded key"), false}, - // normal key less than 32 character - {"s3/documents/=32byteslongsecretkeymustbegiven", "", errors.New("Encryption key should be 32 bytes plain text key or 44 bytes base64 encoded key"), false}, - } - - for idx, testCase := range getDecodeCases { - decodedString, errDecode := getDecodedKey(testCase.input) - if testCase.status == true { - if errDecode != nil { - t.Fatalf("Test %d: generated error not matching, expected = `%s`, found = `%s`", idx+1, testCase.err, errDecode) - } - if !reflect.DeepEqual(decodedString, testCase.output) { - t.Fatalf("Test %d: generated key not matching, expected = `%s`, found = `%s`", idx+1, testCase.input, decodedString) - } - } - - if testCase.status == false { - if !reflect.DeepEqual(decodedString, testCase.output) { - t.Fatalf("Test %d: generated Map not matching, expected = `%s`, found = `%s`", idx+1, testCase.input, errDecode) - } - if errDecode.Cause.Error() != testCase.err.Error() { - t.Fatalf("Test %d: generated error not matching, expected = `%s`, found = `%s`", idx+1, testCase.err, errDecode) - } - } - } -} diff --git a/cmd/cp-main.go b/cmd/cp-main.go index 5648ed4b0d..354acaea87 100644 --- a/cmd/cp-main.go +++ b/cmd/cp-main.go @@ -18,18 +18,14 @@ package cmd import ( - "bufio" "context" "errors" "fmt" "io" - "os" "path/filepath" "strings" - "github.com/dustin/go-humanize" "github.com/fatih/color" - jsoniter "github.com/json-iterator/go" "github.com/minio/cli" json "github.com/minio/colorjson" "github.com/minio/mc/pkg/probe" @@ -67,10 +63,6 @@ var ( Name: "attr", Usage: "add custom metadata for the object", }, - cli.BoolFlag{ - Name: "continue, c", - Usage: "create or resume copy session", - }, cli.BoolFlag{ Name: "preserve, a", Usage: "preserve filesystem attributes (mode, ownership, timestamps)", @@ -122,7 +114,7 @@ var cpCmd = cli.Command{ Action: mainCopy, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(cpFlags, ioFlags...), globalFlags...), + Flags: append(append(cpFlags, encFlags...), globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -132,9 +124,10 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} + ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values + MC_ENC_KMS: KMS encryption key in the form of (alias/prefix=key). + MC_ENC_S3: S3 encryption key in the form of (alias/prefix=key). EXAMPLES: 01. Copy a list of objects from local file system to Amazon S3 cloud storage. @@ -161,12 +154,11 @@ EXAMPLES: 08. Copy a local folder with space separated characters to Amazon S3 cloud storage. {{.Prompt}} {{.HelpName}} --recursive 'workdir/documents/May 2014/' s3/miniocloud - 09. Copy a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage. - {{.Prompt}} {{.HelpName}} --recursive --encrypt-key "s3/documents/=32byteslongsecretkeymustbegiven1,myminio/documents/=32byteslongsecretkeymustbegiven2" s3/documents/ myminio/documents/ + 09. Copy a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage using s3 encryption. + {{.Prompt}} {{.HelpName}} --recursive --enc-s3 "s3/documents/=my-aws-key" --enc-s3 "myminio/documents/=my-minio-key" s3/documents/ myminio/documents/ - 10. Copy a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage. In case the encryption key contains non-printable character like tab, pass the - base64 encoded string as key. - {{.Prompt}} {{.HelpName}} --recursive --encrypt-key "s3/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=,myminio/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=" s3/documents/ myminio/documents/ + 10. Copy a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage. + {{.Prompt}} {{.HelpName}} --recursive --enc-c "s3/documents/=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" --enc-c "myminio/documents/=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5BBB" s3/documents/ myminio/documents/ 11. Copy a list of objects from local file system to MinIO cloud storage with specified metadata, separated by ";" {{.Prompt}} {{.HelpName}} --attr "key1=value1;key2=value2" Music/*.mp4 play/mybucket/ @@ -177,25 +169,22 @@ EXAMPLES: 13. Copy a text file to an object storage and assign REDUCED_REDUNDANCY storage-class to the uploaded object. {{.Prompt}} {{.HelpName}} --storage-class REDUCED_REDUNDANCY myobject.txt play/mybucket - 14. Copy a text file to an object storage and create or resume copy session. - {{.Prompt}} {{.HelpName}} --recursive --continue dir/ play/mybucket - - 15. Copy a text file to an object storage and preserve the file system attribute as metadata. + 14. Copy a text file to an object storage and preserve the file system attribute as metadata. {{.Prompt}} {{.HelpName}} -a myobject.txt play/mybucket - 16. Copy a text file to an object storage with object lock mode set to 'GOVERNANCE' with retention duration 1 day. + 15. Copy a text file to an object storage with object lock mode set to 'GOVERNANCE' with retention duration 1 day. {{.Prompt}} {{.HelpName}} --retention-mode governance --retention-duration 1d locked.txt play/locked-bucket/ - 17. Copy a text file to an object storage with legal-hold enabled. + 16. Copy a text file to an object storage with legal-hold enabled. {{.Prompt}} {{.HelpName}} --legal-hold on locked.txt play/locked-bucket/ - 18. Copy a text file to an object storage and disable multipart upload feature. + 17. Copy a text file to an object storage and disable multipart upload feature. {{.Prompt}} {{.HelpName}} --disable-multipart myobject.txt play/mybucket - 19. Roll back 10 days in the past to copy the content of 'mybucket' + 18. Roll back 10 days in the past to copy the content of 'mybucket' {{.Prompt}} {{.HelpName}} --rewind 10d -r play/mybucket/ /tmp/dest/ - 20. Set tags to the uploaded objects + 19. Set tags to the uploaded objects {{.Prompt}} {{.HelpName}} -r --tags "category=prod&type=backup" ./data/ play/another-bucket/ `, @@ -269,7 +258,7 @@ func doCopy(ctx context.Context, copyOpts doCopyOpts) URLs { urls := uploadSourceToTargetURL(ctx, uploadSourceToTargetURLOpts{ urls: copyOpts.cpURLs, progress: copyOpts.pg, - encKeyDB: copyOpts.encKeyDB, + encKeyDB: copyOpts.encryptionKeys, preserve: copyOpts.preserve, isZip: copyOpts.isZip, multipartSize: copyOpts.multipartSize, @@ -292,90 +281,6 @@ func doCopyFake(cpURLs URLs, pg Progress) URLs { return cpURLs } -// doPrepareCopyURLs scans the source URL and prepares a list of objects for copying. -func doPrepareCopyURLs(ctx context.Context, session *sessionV8, cancelCopy context.CancelFunc) (totalBytes, totalObjects int64, errSeen bool) { - // Separate source and target. 'cp' can take only one target, - // but any number of sources. - sourceURLs := session.Header.CommandArgs[:len(session.Header.CommandArgs)-1] - targetURL := session.Header.CommandArgs[len(session.Header.CommandArgs)-1] // Last one is target - - // Access recursive flag inside the session header. - isRecursive := session.Header.CommandBoolFlags["recursive"] - rewind := session.Header.CommandStringFlags["rewind"] - versionID := session.Header.CommandStringFlags["version-id"] - olderThan := session.Header.CommandStringFlags["older-than"] - newerThan := session.Header.CommandStringFlags["newer-than"] - encryptKeys := session.Header.CommandStringFlags["encrypt-key"] - encrypt := session.Header.CommandStringFlags["encrypt"] - encKeyDB, err := parseAndValidateEncryptionKeys(encryptKeys, encrypt) - fatalIf(err, "Unable to parse encryption keys.") - - // Create a session data file to store the processed URLs. - dataFP := session.NewDataWriter() - - var scanBar scanBarFunc - if !globalQuiet && !globalJSON { // set up progress bar - scanBar = scanBarFactory() - } - - opts := prepareCopyURLsOpts{ - sourceURLs: sourceURLs, - targetURL: targetURL, - isRecursive: isRecursive, - encKeyDB: encKeyDB, - olderThan: olderThan, - newerThan: newerThan, - timeRef: parseRewindFlag(rewind), - versionID: versionID, - } - - URLsCh := prepareCopyURLs(ctx, opts) - done := false - for !done { - select { - case cpURLs, ok := <-URLsCh: - if !ok { // Done with URL preparation - done = true - break - } - - if cpURLs.Error != nil { - printCopyURLsError(&cpURLs) - errSeen = true - break - } - - jsoniter := jsoniter.ConfigCompatibleWithStandardLibrary - jsonData, e := jsoniter.Marshal(cpURLs) - if e != nil { - session.Delete() - fatalIf(probe.NewError(e), "Unable to prepare URL for copying. Error in JSON marshaling.") - } - dataFP.Write(jsonData) - dataFP.Write([]byte{'\n'}) - if !globalQuiet && !globalJSON { - scanBar(cpURLs.SourceContent.URL.String()) - } - - totalBytes += cpURLs.SourceContent.Size - totalObjects++ - case <-globalContext.Done(): - cancelCopy() - // Print in new line and adjust to top so that we don't print over the ongoing scan bar - if !globalQuiet && !globalJSON { - console.Eraseline() - } - session.Delete() // If we are interrupted during the URL scanning, we drop the session. - os.Exit(0) - } - } - - session.Header.TotalBytes = totalBytes - session.Header.TotalObjects = totalObjects - session.Save() - return -} - func printCopyURLsError(cpURLs *URLs) { // Print in new line and adjust to top so that we // don't print over the ongoing scan bar @@ -393,7 +298,7 @@ func printCopyURLsError(cpURLs *URLs) { } } -func doCopySession(ctx context.Context, cancelCopy context.CancelFunc, cli *cli.Context, session *sessionV8, encKeyDB map[string][]prefixSSEPair, isMvCmd bool) error { +func doCopySession(ctx context.Context, cancelCopy context.CancelFunc, cli *cli.Context, encryptionKeys map[string][]prefixSSEPair, isMvCmd bool) error { var isCopied func(string) bool var totalObjects, totalBytes int64 @@ -416,80 +321,43 @@ func doCopySession(ctx context.Context, cancelCopy context.CancelFunc, cli *cli. // Check if the target path has object locking enabled withLock, _ := isBucketLockEnabled(ctx, targetURL) - if session != nil { - // isCopied returns true if an object has been already copied - // or not. This is useful when we resume from a session. - isCopied = isLastFactory(session.Header.LastCopied) + isRecursive := cli.Bool("recursive") + olderThan := cli.String("older-than") + newerThan := cli.String("newer-than") + rewind := cli.String("rewind") + versionID := cli.String("version-id") - if !session.HasData() { - totalBytes, totalObjects, errSeen = doPrepareCopyURLs(ctx, session, cancelCopy) - } else { - totalBytes, totalObjects = session.Header.TotalBytes, session.Header.TotalObjects + go func() { + totalBytes := int64(0) + opts := prepareCopyURLsOpts{ + sourceURLs: sourceURLs, + targetURL: targetURL, + isRecursive: isRecursive, + encKeyDB: encryptionKeys, + olderThan: olderThan, + newerThan: newerThan, + timeRef: parseRewindFlag(rewind), + versionID: versionID, + isZip: cli.Bool("zip"), } - pg.SetTotal(totalBytes) - - go func() { - jsoniter := jsoniter.ConfigCompatibleWithStandardLibrary - // Prepare URL scanner from session data file. - urlScanner := bufio.NewScanner(session.NewDataReader()) - for { - if !urlScanner.Scan() || urlScanner.Err() != nil { - close(cpURLsCh) - break - } - - var cpURLs URLs - if e := jsoniter.Unmarshal([]byte(urlScanner.Text()), &cpURLs); e != nil { - errorIf(probe.NewError(e), "Unable to unmarshal %s", urlScanner.Text()) - continue - } - - cpURLsCh <- cpURLs - } - }() - - } else { - // Access recursive flag inside the session header. - isRecursive := cli.Bool("recursive") - olderThan := cli.String("older-than") - newerThan := cli.String("newer-than") - rewind := cli.String("rewind") - versionID := cli.String("version-id") - - go func() { - totalBytes := int64(0) - opts := prepareCopyURLsOpts{ - sourceURLs: sourceURLs, - targetURL: targetURL, - isRecursive: isRecursive, - encKeyDB: encKeyDB, - olderThan: olderThan, - newerThan: newerThan, - timeRef: parseRewindFlag(rewind), - versionID: versionID, - isZip: cli.Bool("zip"), + for cpURLs := range prepareCopyURLs(ctx, opts) { + if cpURLs.Error != nil { + errSeen = true + printCopyURLsError(&cpURLs) + break } - for cpURLs := range prepareCopyURLs(ctx, opts) { - if cpURLs.Error != nil { - errSeen = true - printCopyURLsError(&cpURLs) - break - } - - totalBytes += cpURLs.SourceContent.Size - pg.SetTotal(totalBytes) - totalObjects++ - cpURLsCh <- cpURLs - } - close(cpURLsCh) - }() - } + totalBytes += cpURLs.SourceContent.Size + pg.SetTotal(totalBytes) + totalObjects++ + cpURLsCh <- cpURLs + } + close(cpURLsCh) + }() quitCh := make(chan struct{}) statusCh := make(chan URLs) - parallel := newParallelManager(statusCh) go func() { @@ -498,7 +366,6 @@ func doCopySession(ctx context.Context, cancelCopy context.CancelFunc, cli *cli. close(statusCh) } - startContinue := true for { select { case <-quitCh: @@ -562,22 +429,14 @@ func doCopySession(ctx context.Context, cancelCopy context.CancelFunc, cli *cli. }, 0) } else { // Print the copy resume summary once in start - if startContinue && cli.Bool("continue") { - if pb, ok := pg.(*progressBar); ok { - startSize := humanize.IBytes(uint64(pb.Start().Get())) - totalSize := humanize.IBytes(uint64(pb.Total)) - console.Println("Resuming copy from ", startSize, " / ", totalSize) - } - startContinue = false - } parallel.queueTask(func() URLs { return doCopy(ctx, doCopyOpts{ - cpURLs: cpURLs, - pg: pg, - encKeyDB: encKeyDB, - isMvCmd: isMvCmd, - preserve: preserve, - isZip: isZip, + cpURLs: cpURLs, + pg: pg, + encryptionKeys: encryptionKeys, + isMvCmd: isMvCmd, + preserve: preserve, + isZip: isZip, }) }, cpURLs.SourceContent.Size) } @@ -598,9 +457,6 @@ loop: if !globalQuiet && !globalJSON { console.Eraseline() } - if session != nil { - session.CloseAndDie() - } break loop case cpURLs, ok := <-statusCh: // Status channel is closed, we should return. @@ -608,10 +464,6 @@ loop: break loop } if cpURLs.Error == nil { - if session != nil { - session.Header.LastCopied = cpURLs.SourceContent.URL.String() - session.Save() - } cpAllFilesErr = false } else { @@ -643,12 +495,6 @@ loop: } } - if session != nil { - // For critical errors we should exit. Session - // can be resumed after the user figures out - // the problem. - session.copyCloseAndDie(session.Header.CommandBoolFlags["session"]) - } } } } @@ -688,97 +534,25 @@ func mainCopy(cliCtx *cli.Context) error { ctx, cancelCopy := context.WithCancel(globalContext) defer cancelCopy() - // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) - fatalIf(err, "Unable to parse encryption keys.") - - // Parse metadata. - userMetaMap := make(map[string]string) - if cliCtx.String("attr") != "" { - userMetaMap, err = getMetaDataEntry(cliCtx.String("attr")) - fatalIf(err, "Unable to parse attribute %v", cliCtx.String("attr")) - } - - // check 'copy' cli arguments. checkCopySyntax(cliCtx) - // Additional command specific theme customization. console.SetColor("Copy", color.New(color.FgGreen, color.Bold)) - recursive := cliCtx.Bool("recursive") - rewind := cliCtx.String("rewind") - versionID := cliCtx.String("version-id") - olderThan := cliCtx.String("older-than") - newerThan := cliCtx.String("newer-than") - storageClass := cliCtx.String("storage-class") - retentionMode := cliCtx.String(rmFlag) - retentionDuration := cliCtx.String(rdFlag) - legalHold := strings.ToUpper(cliCtx.String(lhFlag)) - tags := cliCtx.String("tags") - sseKeys := os.Getenv("MC_ENCRYPT_KEY") - if key := cliCtx.String("encrypt-key"); key != "" { - sseKeys = key - } - - if sseKeys != "" { - sseKeys, err = getDecodedKey(sseKeys) - fatalIf(err, "Unable to parse encryption keys.") - } - sse := cliCtx.String("encrypt") - - var session *sessionV8 - - if cliCtx.Bool("continue") { - sessionID := getHash("cp", os.Args[1:]) - if isSessionExists(sessionID) { - session, err = loadSessionV8(sessionID) - fatalIf(err.Trace(sessionID), "Unable to load session.") - } else { - session = newSessionV8(sessionID) - session.Header.CommandType = "cp" - session.Header.CommandBoolFlags["recursive"] = recursive - session.Header.CommandStringFlags["rewind"] = rewind - session.Header.CommandStringFlags["version-id"] = versionID - session.Header.CommandStringFlags["older-than"] = olderThan - session.Header.CommandStringFlags["newer-than"] = newerThan - session.Header.CommandStringFlags["storage-class"] = storageClass - session.Header.CommandStringFlags["tags"] = tags - session.Header.CommandStringFlags[rmFlag] = retentionMode - session.Header.CommandStringFlags[rdFlag] = retentionDuration - session.Header.CommandStringFlags[lhFlag] = legalHold - session.Header.CommandStringFlags["encrypt-key"] = sseKeys - session.Header.CommandStringFlags["encrypt"] = sse - session.Header.CommandBoolFlags["session"] = cliCtx.Bool("continue") - - if cliCtx.Bool("preserve") { - session.Header.CommandBoolFlags["preserve"] = cliCtx.Bool("preserve") - } - session.Header.UserMetaData = userMetaMap - session.Header.CommandBoolFlags["md5"] = cliCtx.Bool("md5") - session.Header.CommandBoolFlags["disable-multipart"] = cliCtx.Bool("disable-multipart") - - var e error - if session.Header.RootPath, e = os.Getwd(); e != nil { - session.Delete() - fatalIf(probe.NewError(e), "Unable to get current working folder.") - } + var err *probe.Error - // extract URLs. - session.Header.CommandArgs = cliCtx.Args() - } - } - - e := doCopySession(ctx, cancelCopy, cliCtx, session, encKeyDB, false) - if session != nil { - session.Delete() + // Parse encryption keys per command. + encryptionKeyMap, err := validateAndCreateEncryptionKeys(cliCtx) + if err != nil { + err.Trace(cliCtx.Args()...) } + fatalIf(err, "SSE Error") - return e + return doCopySession(ctx, cancelCopy, cliCtx, encryptionKeyMap, false) } type doCopyOpts struct { cpURLs URLs pg ProgressReader - encKeyDB map[string][]prefixSSEPair + encryptionKeys map[string][]prefixSSEPair isMvCmd, preserve, isZip bool updateProgressTotal bool multipartSize string diff --git a/cmd/diff-main.go b/cmd/diff-main.go index dd4e3f60e3..2ed2b7ca76 100644 --- a/cmd/diff-main.go +++ b/cmd/diff-main.go @@ -202,7 +202,7 @@ func mainDiff(cliCtx *cli.Context) error { defer cancelDiff() // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) + encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx) fatalIf(err, "Unable to parse encryption keys.") // check 'diff' cli arguments. diff --git a/cmd/du-main.go b/cmd/du-main.go index d6e689254c..9c3ef698dc 100644 --- a/cmd/du-main.go +++ b/cmd/du-main.go @@ -62,7 +62,7 @@ var duCmd = cli.Command{ Action: mainDu, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(duFlags, ioFlags...), globalFlags...), + Flags: append(duFlags, globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -72,8 +72,6 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values EXAMPLES: 1. Summarize disk usage of 'jazz-songs' bucket recursively. @@ -121,7 +119,7 @@ func (r duMessage) JSON() string { return string(msgBytes) } -func du(ctx context.Context, urlStr string, timeRef time.Time, withVersions bool, depth int, encKeyDB map[string][]prefixSSEPair) (sz, objs int64, err error) { +func du(ctx context.Context, urlStr string, timeRef time.Time, withVersions bool, depth int) (sz, objs int64, err error) { targetAlias, targetURL, _ := mustExpandAlias(urlStr) if !strings.HasSuffix(targetURL, "/") { @@ -176,7 +174,7 @@ func du(ctx context.Context, urlStr string, timeRef time.Time, withVersions bool if targetAlias != "" { subDirAlias = targetAlias + "/" + content.URL.Path } - used, n, err := du(ctx, subDirAlias, timeRef, withVersions, depth, encKeyDB) + used, n, err := du(ctx, subDirAlias, timeRef, withVersions, depth) if err != nil { return 0, 0, err } @@ -223,10 +221,6 @@ func mainDu(cliCtx *cli.Context) error { ctx, cancelRm := context.WithCancel(globalContext) defer cancelRm() - // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) - fatalIf(err, "Unable to parse encryption keys.") - // du specific flags. depth := cliCtx.Int("depth") if depth == 0 { @@ -250,7 +244,7 @@ func mainDu(cliCtx *cli.Context) error { fatalIf(errInvalidArgument().Trace(urlStr), fmt.Sprintf("Source `%s` is not a folder. Only folders are supported by 'du' command.", urlStr)) } - if _, _, err := du(ctx, urlStr, timeRef, withVersions, depth, encKeyDB); duErr == nil { + if _, _, err := du(ctx, urlStr, timeRef, withVersions, depth); duErr == nil { duErr = err } } diff --git a/cmd/encryption-methods.go b/cmd/encryption-methods.go new file mode 100644 index 0000000000..50f43e212c --- /dev/null +++ b/cmd/encryption-methods.go @@ -0,0 +1,252 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// # This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "encoding/base64" + "sort" + "strconv" + "strings" + + "github.com/minio/cli" + "github.com/minio/mc/pkg/probe" + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +type sseKeyType int + +const ( + sseNone sseKeyType = iota + sseC + sseKMS + sseS3 +) + +// struct representing object prefix and sse keys association. +type prefixSSEPair struct { + Prefix string + SSE encrypt.ServerSide +} + +// byPrefixLength implements sort.Interface. +type byPrefixLength []prefixSSEPair + +func (p byPrefixLength) Len() int { return len(p) } +func (p byPrefixLength) Less(i, j int) bool { + if len(p[i].Prefix) != len(p[j].Prefix) { + return len(p[i].Prefix) > len(p[j].Prefix) + } + return p[i].Prefix < p[j].Prefix +} + +func (p byPrefixLength) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// get SSE Key if object prefix matches with given resource. +func getSSE(resource string, encKeys []prefixSSEPair) encrypt.ServerSide { + for _, k := range encKeys { + if strings.HasPrefix(resource, k.Prefix) { + return k.SSE + } + } + return nil +} + +func validateAndCreateEncryptionKeys(ctx *cli.Context) (encMap map[string][]prefixSSEPair, err *probe.Error) { + encMap = make(map[string][]prefixSSEPair, 0) + + for _, v := range ctx.StringSlice("enc-kms") { + prefixPair, alias, err := validateAndParseKey(ctx, v, sseKMS) + if err != nil { + return nil, err + } + encMap[alias] = append(encMap[alias], *prefixPair) + } + + for _, v := range ctx.StringSlice("enc-s3") { + prefixPair, alias, err := validateAndParseKey(ctx, v, sseS3) + if err != nil { + return nil, err + } + encMap[alias] = append(encMap[alias], *prefixPair) + } + + for _, v := range ctx.StringSlice("enc-c") { + prefixPair, alias, err := validateAndParseKey(ctx, v, sseC) + if err != nil { + return nil, err + } + encMap[alias] = append(encMap[alias], *prefixPair) + } + + for i := range encMap { + err = validateOverLappingSSEKeys(encMap[i]) + if err != nil { + return nil, err + } + } + + for alias, ps := range encMap { + if hostCfg := mustGetHostConfig(alias); hostCfg == nil { + for _, p := range ps { + return nil, errSSEInvalidAlias(p.Prefix) + } + } + } + + for _, encKeys := range encMap { + sort.Sort(byPrefixLength(encKeys)) + } + + return encMap, nil +} + +func validateAndParseKey(ctx *cli.Context, key string, keyType sseKeyType) (SSEPair *prefixSSEPair, alias string, perr *probe.Error) { + matchedCount := 0 + alias, prefix, encKey, keyErr := parseSSEKey(key, keyType) + if keyErr != nil { + return nil, "", keyErr + } + if alias == "" { + return nil, "", errSSEInvalidAlias(prefix).Trace(key) + } + + if (keyType == sseKMS || keyType == sseC) && encKey == "" { + return nil, "", errSSEClientKeyFormat("SSE-C/KMS key should be of the form alias/prefix=key,... ").Trace(key) + } + + for _, arg := range ctx.Args() { + if strings.HasPrefix(arg, alias+"/"+prefix) { + matchedCount++ + } + } + + if matchedCount == 0 { + return nil, "", errSSEPrefixMatch() + } + + ssePairPrefix := alias + "/" + prefix + var sse encrypt.ServerSide + var err error + + switch keyType { + case sseC: + sse, err = encrypt.NewSSEC([]byte(encKey)) + case sseKMS: + sse, err = encrypt.NewSSEKMS(encKey, nil) + case sseS3: + sse = encrypt.NewSSE() + } + + if err != nil { + return nil, "", probe.NewError(err).Trace(key) + } + + return &prefixSSEPair{ + Prefix: ssePairPrefix, + SSE: sse, + }, alias, nil +} + +func validateOverLappingSSEKeys(keyMap []prefixSSEPair) (err *probe.Error) { + for i := 0; i < len(keyMap); i++ { + for j := i + 1; j < len(keyMap); j++ { + if strings.HasPrefix(keyMap[i].Prefix, keyMap[j].Prefix) || + strings.HasPrefix(keyMap[j].Prefix, keyMap[i].Prefix) { + return errSSEOverlappingAlias(keyMap[i].Prefix, keyMap[j].Prefix) + } + } + } + return +} + +func splitKey(sseKey string) (alias, prefix string) { + x := strings.SplitN(sseKey, "/", 2) + switch len(x) { + case 2: + return x[0], x[1] + case 1: + return x[0], "" + } + return "", "" +} + +func parseSSEKey(sseKey string, keyType sseKeyType) ( + alias string, + prefix string, + key string, + err *probe.Error, +) { + if keyType == sseS3 { + alias, prefix = splitKey(sseKey) + return + } + + var path string + alias, path = splitKey(sseKey) + splitPath := strings.Split(path, "=") + if len(splitPath) == 0 { + err = errSSEKeyMissing().Trace(sseKey) + return + } + + aliasPlusPrefix := strings.Join(splitPath[:len(splitPath)-1], "=") + prefix = strings.Replace(aliasPlusPrefix, alias+"/", "", 1) + key = splitPath[len(splitPath)-1] + + if keyType == sseC { + keyB, de := base64.RawStdEncoding.DecodeString(key) + if de != nil { + err = errSSEClientKeyFormat("One of the inserted keys was " + strconv.Itoa(len(key)) + " bytes and did not have valid base64 raw encoding.").Trace(sseKey) + return + } + key = string(keyB) + if len(key) != 32 { + err = errSSEClientKeyFormat("The plain text key was " + strconv.Itoa(len(key)) + " bytes but should be 32 bytes long").Trace(sseKey) + return + } + } + + if keyType == sseKMS { + if !validKMSKeyName(key) { + err = errSSEKMSKeyFormat("One of the inserted keys was " + strconv.Itoa(len(key)) + " bytes and did not have a valid KMS key name.").Trace(sseKey) + return + } + } + + return +} + +func validKMSKeyName(s string) bool { + if s == "" || s == "_" { + return false + } + + n := len(s) - 1 + for i, r := range s { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-' && i > 0 && i < n: + case r == '_': + default: + return false + } + } + return true +} diff --git a/cmd/encryption-methods_test.go b/cmd/encryption-methods_test.go new file mode 100644 index 0000000000..fe64b82a67 --- /dev/null +++ b/cmd/encryption-methods_test.go @@ -0,0 +1,134 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// # This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "fmt" + "testing" +) + +func TestParseEncryptionKeys(t *testing.T) { + baseAlias := "mintest" + basePrefix := "two/layer/prefix" + baseObject := "object_name" + sseKey := "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" + sseKeyPlain := "01234567890123456789012345678900" + + // INVALID KEYS + sseKeyInvalidShort := "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2" + sseKeyInvalidSymbols := "MDEyMzQ1Njc4O____jM0N!!!ODkwMTIzNDU2Nzg5MDA" + sseKeyInvalidSpaces := "MDE yMzQ1Njc4OTAxM jM0NTY3ODkwMTIzNDU2Nzg5MDA" + sseKeyInvalidPrefixSpace := " MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" + sseKeyInvalidOneShort := "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MD" + + testCases := []struct { + encryptionKey string + keyPlain string + alias string + prefix string + object string + sseType sseKeyType + success bool + }{ + { + encryptionKey: fmt.Sprintf("%s/%s/%s=%s", baseAlias, basePrefix, baseObject, sseKey), + keyPlain: sseKeyPlain, + alias: baseAlias, + prefix: basePrefix, + object: baseObject, + success: true, + }, + { + encryptionKey: fmt.Sprintf("%s=/%s=/%s==%s", baseAlias, basePrefix, baseObject, sseKey), + keyPlain: sseKeyPlain, + alias: baseAlias + "=", + prefix: basePrefix + "=", + object: baseObject + "=", + success: true, + }, + { + encryptionKey: fmt.Sprintf("%s//%s//%s/=%s", baseAlias, basePrefix, baseObject, sseKey), + keyPlain: sseKeyPlain, + alias: baseAlias + "/", + prefix: basePrefix + "/", + object: baseObject + "/", + success: true, + }, + { + encryptionKey: fmt.Sprintf("%s/%s/%s==%s", baseAlias, basePrefix, baseObject, sseKey), + keyPlain: sseKeyPlain, + alias: baseAlias, + prefix: basePrefix, + object: baseObject + "=", + success: true, + }, + { + encryptionKey: fmt.Sprintf("%s/%s/%s!@_==_$^&*=%s", baseAlias, basePrefix, baseObject, sseKey), + keyPlain: sseKeyPlain, + alias: baseAlias, + prefix: basePrefix, + object: baseObject + "!@_==_$^&*", + success: true, + }, + { + encryptionKey: fmt.Sprintf("%s/%s/%s=%sXXXXX", baseAlias, basePrefix, baseObject, sseKey), + success: false, + }, + { + encryptionKey: fmt.Sprintf("%s/%s/%s=%s", baseAlias, basePrefix, baseObject, sseKeyInvalidShort), + success: false, + }, + { + encryptionKey: fmt.Sprintf("%s/%s/%s=%s", baseAlias, basePrefix, baseObject, sseKeyInvalidSymbols), + success: false, + }, + { + encryptionKey: fmt.Sprintf("%s/%s/%s=%s", baseAlias, basePrefix, baseObject, sseKeyInvalidSpaces), + success: false, + }, + { + encryptionKey: fmt.Sprintf("%s/%s/%s=%s", baseAlias, basePrefix, baseObject, sseKeyInvalidPrefixSpace), + success: false, + }, + { + encryptionKey: fmt.Sprintf("%s/%s/%s==%s", baseAlias, basePrefix, baseObject, sseKeyInvalidOneShort), + success: false, + }, + } + + for i, tc := range testCases { + alias, prefix, key, err := parseSSEKey(tc.encryptionKey, sseC) + if tc.success { + if err != nil { + t.Fatalf("Test %d: Expected success, got %s", i+1, err) + } + if fmt.Sprintf("%s/%s", alias, prefix) != fmt.Sprintf("%s/%s/%s", tc.alias, tc.prefix, tc.object) { + t.Fatalf("Test %d: alias and prefix parsing was invalid, expected %s/%s/%s, got %s/%s", i, tc.alias, tc.prefix, tc.object, alias, prefix) + } + if key != tc.keyPlain { + t.Fatalf("Test %d: sse key parsing is invalid, expected %s, got %s", i, tc.keyPlain, key) + } + } + + if !tc.success { + if err == nil { + t.Fatalf("Test %d: Expected error, got success", i+1) + } + } + } +} diff --git a/cmd/error.go b/cmd/error.go index 33663a69b4..021fa5e71d 100644 --- a/cmd/error.go +++ b/cmd/error.go @@ -178,3 +178,20 @@ func deprecatedError(newCommandName string) { err := probe.NewError(fmt.Errorf("Please use '%s' instead", newCommandName)) fatal(err, "Deprecated command") } + +// deprecatedError function for deprecated flags +func deprecatedFlagError(oldFlag, newFlag string) { + err := probe.NewError(fmt.Errorf("'%s' has been deprecated, please use %s instead", oldFlag, newFlag)) + fatal(err, "a deprecated Flag") +} + +func deprecatedFlagsWarning(cliCtx *cli.Context) { + for _, v := range cliCtx.Args() { + switch v { + case "--encrypt", "-encrypt": + deprecatedFlagError("--encrypt", "--enc-s3 or --enc-kms") + case "--encrypt-key", "-encrypt-key": + deprecatedFlagError("--encrypt-key", "--enc-c") + } + } +} diff --git a/cmd/find-main.go b/cmd/find-main.go index 234b9e1d44..4788611995 100644 --- a/cmd/find-main.go +++ b/cmd/find-main.go @@ -240,7 +240,7 @@ func mainFind(cliCtx *cli.Context) error { console.SetColor("FindExecErr", color.New(color.FgRed, color.Italic, color.Bold)) // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) + encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx) fatalIf(err, "Unable to parse encryption keys.") checkFindSyntax(ctx, cliCtx, encKeyDB) diff --git a/cmd/flags.go b/cmd/flags.go index 092e7d856d..dc106019c1 100644 --- a/cmd/flags.go +++ b/cmd/flags.go @@ -88,16 +88,26 @@ var globalFlags = []cli.Flag{ }, } -// Flags common across all I/O commands such as cp, mirror, stat, pipe etc. -var ioFlags = []cli.Flag{ - cli.StringFlag{ - Name: "encrypt-key", - Usage: "encrypt/decrypt objects (using server-side encryption with customer provided keys)", - EnvVar: envPrefix + "ENCRYPT_KEY", - }, - cli.StringFlag{ - Name: "encrypt", - Usage: "encrypt/decrypt objects (using server-side encryption with server managed keys)", - EnvVar: envPrefix + "ENCRYPT", - }, +// bundled encryption flags +var encFlags = []cli.Flag{ + encCFlag, + encKSMFlag, + encS3Flag, +} + +var encCFlag = cli.StringSliceFlag{ + Name: "enc-c", + Usage: "encrypt/decrypt objects using client provided keys. (multiple keys can be provided) Format: Raw base64 encoding.", +} + +var encKSMFlag = cli.StringSliceFlag{ + Name: "enc-kms", + Usage: "encrypt/decrypt objects using specific server-side encryption keys. (multiple keys can be provided)", + EnvVar: envPrefix + "ENC_KMS", +} + +var encS3Flag = cli.StringSliceFlag{ + Name: "enc-s3", + Usage: "encrypt/decrypt objects using server-side default keys and configurations. (multiple keys can be provided).", + EnvVar: envPrefix + "ENC_S3", } diff --git a/cmd/get-main.go b/cmd/get-main.go index da3ffae463..f78a0a51aa 100644 --- a/cmd/get-main.go +++ b/cmd/get-main.go @@ -37,7 +37,7 @@ var getCmd = cli.Command{ Action: mainGet, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(ioFlags, globalFlags...), getFlags...), + Flags: append(append(globalFlags, encCFlag), getFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -47,29 +47,32 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} -ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values EXAMPLES: - 1. Get an object from S3 storage to local file system - {{.Prompt}} {{.HelpName}} ALIAS/BUCKET/object path-to/object + 1. Get an object from MinIO storage to local file system + {{.Prompt}} {{.HelpName}} play/mybucket/object path-to/object + + 2. Get an object from MinIO storage using encryption + {{.Prompt}} {{.HelpName}} --enc-c "play/mybucket/object=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" play/mybucket/object path-to/object `, } // mainGet is the entry point for get command. func mainGet(cliCtx *cli.Context) (e error) { - ctx, cancelGet := context.WithCancel(globalContext) - defer cancelGet() - - encKeyDB, err := getEncKeys(cliCtx) - fatalIf(err, "Unable to parse encryption keys.") - args := cliCtx.Args() if len(args) != 2 { showCommandHelpAndExit(cliCtx, 1) // last argument is exit code. } + ctx, cancelGet := context.WithCancel(globalContext) + defer cancelGet() + + encryptionKeys, err := validateAndCreateEncryptionKeys(cliCtx) + if err != nil { + err.Trace(cliCtx.Args()...) + } + fatalIf(err, "unable to parse encryption keys") + // get source and target sourceURLs := args[:len(args)-1] targetURL := args[len(args)-1] @@ -89,7 +92,7 @@ func mainGet(cliCtx *cli.Context) (e error) { opts := prepareCopyURLsOpts{ sourceURLs: sourceURLs, targetURL: targetURL, - encKeyDB: encKeyDB, + encKeyDB: encryptionKeys, ignoreBucketExistsCheck: true, } @@ -121,7 +124,7 @@ func mainGet(cliCtx *cli.Context) (e error) { urls := doCopy(ctx, doCopyOpts{ cpURLs: getURLs, pg: pg, - encKeyDB: encKeyDB, + encryptionKeys: encryptionKeys, updateProgressTotal: true, }) if urls.Error != nil { diff --git a/cmd/head-main.go b/cmd/head-main.go index 5a20434b35..8e1b2aad3a 100644 --- a/cmd/head-main.go +++ b/cmd/head-main.go @@ -59,7 +59,7 @@ var headCmd = cli.Command{ Action: mainHead, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(headFlags, ioFlags...), globalFlags...), + Flags: append(append(headFlags, encCFlag), globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -69,8 +69,6 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values NOTE: '{{.HelpName}}' automatically decompresses 'gzip', 'bzip2' compressed objects. @@ -80,11 +78,10 @@ EXAMPLES: {{.Prompt}} {{.HelpName}} -n 1 s3/csv-data/population.csv.gz 2. Display only first line from server encrypted object on Amazon S3. - {{.Prompt}} {{.HelpName}} -n 1 --encrypt-key 's3/csv-data=32byteslongsecretkeymustbegiven1' s3/csv-data/population.csv + {{.Prompt}} {{.HelpName}} -n 1 --enc-c 's3/csv-data=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA' s3/csv-data/population.csv - 3. Display only first line from server encrypted object on Amazon S3. In case the encryption key contains non-printable character like tab, pass the - base64 encoded string as key. - {{.Prompt}} {{.HelpName}} --encrypt-key "s3/json-data=MzJieXRlc2xvbmdzZWNyZXRrZQltdXN0YmVnaXZlbjE=" s3/json-data/population.json + 3. Display only first line from server encrypted object on Amazon S3. + {{.Prompt}} {{.HelpName}} --enc-c "s3/json-data=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" s3/json-data/population.json 4. Display the first lines of a specific object version. {{.Prompt}} {{.HelpName}} --version-id "3ddac055-89a7-40fa-8cd3-530a5581b6b8" s3/json-data/population.json @@ -188,7 +185,7 @@ func parseHeadSyntax(ctx *cli.Context) (args []string, versionID string, timeRef // mainHead is the main entry point for head command. func mainHead(ctx *cli.Context) error { // Parse encryption keys per command. - encKeyDB, err := getEncKeys(ctx) + encryptionKeys, err := validateAndCreateEncryptionKeys(ctx) fatalIf(err, "Unable to parse encryption keys.") args, versionID, timeRef := parseHeadSyntax(ctx) @@ -203,7 +200,15 @@ func mainHead(ctx *cli.Context) error { // Convert arguments to URLs: expand alias, fix format. for _, url := range ctx.Args() { - fatalIf(headURL(url, versionID, timeRef, encKeyDB, ctx.Int64("lines"), ctx.Bool("zip")).Trace(url), "Unable to read from `"+url+"`.") + err = headURL( + url, + versionID, + timeRef, + encryptionKeys, + ctx.Int64("lines"), + ctx.Bool("zip"), + ) + fatalIf(err.Trace(url), "Unable to read from `"+url+"`.") } return nil diff --git a/cmd/ilm-restore.go b/cmd/ilm-restore.go index c997cc3eb1..af398f467b 100644 --- a/cmd/ilm-restore.go +++ b/cmd/ilm-restore.go @@ -57,7 +57,7 @@ var ilmRestoreCmd = cli.Command{ Action: mainILMRestore, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(ilmRestoreFlags, ioFlags...), globalFlags...), + Flags: append(append(ilmRestoreFlags, encCFlag), globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -86,7 +86,7 @@ EXAMPLES: {{.Prompt}} {{.HelpName}} --recursive --versions myminio/mybucket/dir/ 5. Restore an SSE-C encrypted object. - {{.Prompt}} {{.HelpName}} --encrypt-key "myminio/mybucket/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=" myminio/mybucket/myobject.txt + {{.Prompt}} {{.HelpName}} --enc-c "myminio/mybucket/=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" myminio/mybucket/myobject.txt `, } @@ -311,7 +311,7 @@ func mainILMRestore(cliCtx *cli.Context) (cErr error) { includeVersions := cliCtx.Bool("versions") days := cliCtx.Int("days") - encKeyDB, err := getEncKeys(cliCtx) + encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx) fatalIf(err, "Unable to parse encryption keys.") targetAlias, targetURL, _ := mustExpandAlias(aliasedURL) diff --git a/cmd/main.go b/cmd/main.go index 6c182c8498..920d2b97a9 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -256,9 +256,6 @@ func migrate() { // Migrate config files if any. migrateConfig() - // Migrate session files if any. - migrateSession() - // Migrate shared urls if any. migrateShare() } @@ -275,11 +272,6 @@ func initMC() { } } - // Check if mc session directory exists. - if !isSessionDirExists() { - fatalIf(createSessionDir().Trace(), "Unable to create session config directory.") - } - // Check if mc share directory exists. if !isShareDirExists() { initShareConfig() @@ -360,6 +352,8 @@ func installAutoCompletion() { } func registerBefore(ctx *cli.Context) error { + deprecatedFlagsWarning(ctx) + if ctx.IsSet("config-dir") { // Set the config directory. setMcConfigDir(ctx.String("config-dir")) diff --git a/cmd/mirror-main.go b/cmd/mirror-main.go index ece6429bf6..42980c6b38 100644 --- a/cmd/mirror-main.go +++ b/cmd/mirror-main.go @@ -151,7 +151,7 @@ var mirrorCmd = cli.Command{ Action: mainMirror, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(mirrorFlags, ioFlags...), globalFlags...), + Flags: append(append(mirrorFlags, encFlags...), globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -161,9 +161,10 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} + ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values + MC_ENC_KMS: KMS encryption key in the form of (alias/prefix=key). + MC_ENC_S3: S3 encryption key in the form of (alias/prefix=key). EXAMPLES: 01. Mirror a bucket recursively from MinIO cloud storage to a bucket on Amazon S3 cloud storage. @@ -206,20 +207,16 @@ EXAMPLES: 12. Mirror objects older than 30 days from Amazon S3 bucket test to a local folder. {{.Prompt}} {{.HelpName}} --older-than 30d s3/test ~/test - 13. Mirror server encrypted objects from MinIO cloud storage to a bucket on Amazon S3 cloud storage - {{.Prompt}} {{.HelpName}} --encrypt-key "minio/photos=32byteslongsecretkeymustbegiven1,s3/archive=32byteslongsecretkeymustbegiven2" minio/photos/ s3/archive/ - - 14. Mirror server encrypted objects from MinIO cloud storage to a bucket on Amazon S3 cloud storage. In case the encryption key contains - non-printable character like tab, pass the base64 encoded string as key. - {{.Prompt}} {{.HelpName}} --encrypt-key "s3/photos/=32byteslongsecretkeymustbegiven1,play/archive/=MzJieXRlc2xvbmdzZWNyZXRrZQltdXN0YmVnaXZlbjE=" s3/photos/ play/archive/ + 13. Mirror server encrypted objects from Amazon S3 cloud storage to a bucket on Amazon S3 cloud storage + {{.Prompt}} {{.HelpName}} --enc-c "minio/archive=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" --enc-c "s3/archive=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5BBB" s3/archive/ minio/archive/ - 15. Update 'Cache-Control' header on all existing objects recursively. + 14. Update 'Cache-Control' header on all existing objects recursively. {{.Prompt}} {{.HelpName}} --attr "Cache-Control=max-age=90000,min-fresh=9000" myminio/video-files myminio/video-files - 16. Mirror a local folder recursively to Amazon S3 cloud storage and preserve all local file attributes. + 15. Mirror a local folder recursively to Amazon S3 cloud storage and preserve all local file attributes. {{.Prompt}} {{.HelpName}} -a backup/ s3/archive - 17. Cross mirror between sites in a active-active deployment. + 16. Cross mirror between sites in a active-active deployment. Site-A: {{.Prompt}} {{.HelpName}} --active-active siteA siteB Site-B: {{.Prompt}} {{.HelpName}} --active-active siteB siteA `, @@ -1103,8 +1100,7 @@ func mainMirror(cliCtx *cli.Context) error { ctx, cancelMirror := context.WithCancel(globalContext) defer cancelMirror() - // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) + encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx) fatalIf(err, "Unable to parse encryption keys.") // check 'mirror' cli arguments. diff --git a/cmd/mv-main.go b/cmd/mv-main.go index 3d3047a6f3..d0d9be9509 100644 --- a/cmd/mv-main.go +++ b/cmd/mv-main.go @@ -20,7 +20,6 @@ package cmd import ( "context" "fmt" - "os" "sync" "github.com/fatih/color" @@ -52,10 +51,6 @@ var ( Name: "attr", Usage: "add custom metadata for the object", }, - cli.BoolFlag{ - Name: "continue, c", - Usage: "create or resume move session", - }, cli.BoolFlag{ Name: "preserve, a", Usage: "preserve filesystem attributes (mode, ownership, timestamps)", @@ -74,7 +69,7 @@ var mvCmd = cli.Command{ Action: mainMove, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(mvFlags, ioFlags...), globalFlags...), + Flags: append(append(mvFlags, encFlags...), globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -84,9 +79,10 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} + ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values + MC_ENC_KMS: KMS encryption key in the form of (alias/prefix=key). + MC_ENC_S3: S3 encryption key in the form of (alias/prefix=key). EXAMPLES: 01. Move a list of objects from local file system to Amazon S3 cloud storage. @@ -113,30 +109,26 @@ EXAMPLES: 08. Move a local folder with space separated characters to Amazon S3 cloud storage. {{.Prompt}} {{.HelpName}} --recursive 'workdir/documents/May 2014/' s3/miniocloud - 09. Move a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage. - {{.Prompt}} {{.HelpName}} --recursive --encrypt-key "s3/documents/=32byteslongsecretkeymustbegiven1,myminio/documents/=32byteslongsecretkeymustbegiven2" s3/documents/ myminio/documents/ - - 10. Move a folder with encrypted objects recursively from Amazon S3 to MinIO cloud storage. In case the encryption key contains non-printable character like tab, pass the - base64 encoded string as key. - {{.Prompt}} {{.HelpName}} --recursive --encrypt-key "s3/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=,myminio/documents/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=" s3/documents/ myminio/documents/ - - 11. Move a list of objects from local file system to MinIO cloud storage with specified metadata, separated by ";" + 09. Move a list of objects from local file system to MinIO cloud storage with specified metadata, separated by ";" {{.Prompt}} {{.HelpName}} --attr "key1=value1;key2=value2" Music/*.mp4 play/mybucket/ - 12. Move a folder recursively from MinIO cloud storage to Amazon S3 cloud storage with Cache-Control and custom metadata, separated by ";". + 10. Move a folder recursively from MinIO cloud storage to Amazon S3 cloud storage with Cache-Control and custom metadata, separated by ";". {{.Prompt}} {{.HelpName}} --attr "Cache-Control=max-age=90000,min-fresh=9000;key1=value1;key2=value2" --recursive play/mybucket/myfolder/ s3/mybucket/ - 13. Move a text file to an object storage and assign REDUCED_REDUNDANCY storage-class to the uploaded object. + 11. Move a text file to an object storage and assign REDUCED_REDUNDANCY storage-class to the uploaded object. {{.Prompt}} {{.HelpName}} --storage-class REDUCED_REDUNDANCY myobject.txt play/mybucket - 14. Move a text file to an object storage and create or resume copy session. - {{.Prompt}} {{.HelpName}} --recursive --continue dir/ play/mybucket - - 15. Move a text file to an object storage and preserve the file system attribute as metadata. + 12. Move a text file to an object storage and preserve the file system attribute as metadata. {{.Prompt}} {{.HelpName}} -a myobject.txt play/mybucket - 16. Move a text file to an object storage and disable multipart upload feature. + 13. Move a text file to an object storage and disable multipart upload feature. {{.Prompt}} {{.HelpName}} --disable-multipart myobject.txt play/mybucket + + 14. Move a folder using client provided encryption keys from Amazon S3 to MinIO cloud storage. + {{.Prompt}} {{.HelpName}} --r --enc-c "s3/documents/=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MBB" --enc-c "myminio/documents/=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" s3/documents/ myminio/documents/ + + 15. Move a folder using specific server managed encryption keys from Amazon S3 to MinIO cloud storage. + {{.Prompt}} {{.HelpName}} --r --enc-s3 "s3/documents/=my-s3-key" --enc-s3 "myminio/documents/=my-minio-key" s3/documents/ myminio/documents/ `, } @@ -211,19 +203,8 @@ func mainMove(cliCtx *cli.Context) error { ctx, cancelMove := context.WithCancel(globalContext) defer cancelMove() - // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) - fatalIf(err, "Unable to parse encryption keys.") - - // Parse metadata. - userMetaMap := make(map[string]string) - if cliCtx.String("attr") != "" { - userMetaMap, err = getMetaDataEntry(cliCtx.String("attr")) - fatalIf(err, "Unable to parse attribute %v", cliCtx.String("attr")) - } - - // check 'copy' cli arguments. checkCopySyntax(cliCtx) + console.SetColor("Copy", color.New(color.FgGreen, color.Bold)) if cliCtx.NArg() == 2 { args := cliCtx.Args() @@ -234,63 +215,12 @@ func mainMove(cliCtx *cli.Context) error { } } - // Additional command speific theme customization. - console.SetColor("Copy", color.New(color.FgGreen, color.Bold)) - - recursive := cliCtx.Bool("recursive") - olderThan := cliCtx.String("older-than") - newerThan := cliCtx.String("newer-than") - storageClass := cliCtx.String("storage-class") - sseKeys := os.Getenv("MC_ENCRYPT_KEY") - if key := cliCtx.String("encrypt-key"); key != "" { - sseKeys = key - } - - if sseKeys != "" { - sseKeys, err = getDecodedKey(sseKeys) - fatalIf(err, "Unable to parse encryption keys.") - } - sse := cliCtx.String("encrypt") - - var session *sessionV8 - - if cliCtx.Bool("continue") { - sessionID := getHash("mv", cliCtx.Args()) - if isSessionExists(sessionID) { - session, err = loadSessionV8(sessionID) - fatalIf(err.Trace(sessionID), "Unable to load session.") - } else { - session = newSessionV8(sessionID) - session.Header.CommandType = "mv" - session.Header.CommandBoolFlags["recursive"] = recursive - session.Header.CommandStringFlags["older-than"] = olderThan - session.Header.CommandStringFlags["newer-than"] = newerThan - session.Header.CommandStringFlags["storage-class"] = storageClass - session.Header.CommandStringFlags["encrypt-key"] = sseKeys - session.Header.CommandStringFlags["encrypt"] = sse - session.Header.CommandBoolFlags["session"] = cliCtx.Bool("continue") - - if cliCtx.Bool("preserve") { - session.Header.CommandBoolFlags["preserve"] = cliCtx.Bool("preserve") - } - session.Header.UserMetaData = userMetaMap - session.Header.CommandBoolFlags["disable-multipart"] = cliCtx.Bool("disable-multipart") + var err *probe.Error - var e error - if session.Header.RootPath, e = os.Getwd(); e != nil { - session.Delete() - fatalIf(probe.NewError(e), "Unable to get current working folder.") - } - - // extract URLs. - session.Header.CommandArgs = cliCtx.Args() - } - } + encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx) + fatalIf(err, "Unable to parse encryption keys.") - e := doCopySession(ctx, cancelMove, cliCtx, session, encKeyDB, true) - if session != nil { - session.Delete() - } + e := doCopySession(ctx, cancelMove, cliCtx, encKeyDB, true) console.Colorize("Copy", "Waiting for move operations to complete") rmManager.close() diff --git a/cmd/pipe-main.go b/cmd/pipe-main.go index e78d42a32f..2cfe3170f8 100644 --- a/cmd/pipe-main.go +++ b/cmd/pipe-main.go @@ -71,7 +71,7 @@ var pipeCmd = cli.Command{ Action: mainPipe, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(pipeFlags, ioFlags...), globalFlags...), + Flags: append(append(pipeFlags, encFlags...), globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -81,9 +81,10 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}}{{end}} + ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefix values - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values + MC_ENC_KMS: KMS encryption key in the form of (alias/prefix=key). + MC_ENC_S3: S3 encryption key in the form of (alias/prefix=key). EXAMPLES: 1. Write contents of stdin to a file on local filesystem. @@ -95,16 +96,19 @@ EXAMPLES: 3. Copy an ISO image to an object on Amazon S3 cloud storage. {{.Prompt}} cat debian-8.2.iso | {{.HelpName}} s3/opensource-isos/gnuos.iso - 4. Stream MySQL database dump to Amazon S3 directly. + 4. Copy an ISO image to an object on minio storage using KMS encryption. + {{.Prompt}} cat debian-8.2.iso | {{.HelpName}} --enc-kms="minio/opensource-isos=my-key-name" minio/opensource-isos/gnuos.iso + + 5. Stream MySQL database dump to Amazon S3 directly. {{.Prompt}} mysqldump -u root -p ******* accountsdb | {{.HelpName}} s3/sql-backups/backups/accountsdb-oct-9-2015.sql - 5. Write contents of stdin to an object on Amazon S3 cloud storage and assign REDUCED_REDUNDANCY storage-class to the uploaded object. + 6. Write contents of stdin to an object on Amazon S3 cloud storage and assign REDUCED_REDUNDANCY storage-class to the uploaded object. {{.Prompt}} {{.HelpName}} --storage-class REDUCED_REDUNDANCY s3/personalbuck/meeting-notes.txt - 6. Copy to MinIO cloud storage with specified metadata, separated by ";" + 7. Copy to MinIO cloud storage with specified metadata, separated by ";" {{.Prompt}} cat music.mp3 | {{.HelpName}} --attr "Cache-Control=max-age=90000,min-fresh=9000;Artist=Unknown" play/mybucket/music.mp3 - 7. Set tags to the uploaded objects + 8. Set tags to the uploaded objects {{.Prompt}} tar cvf - . | {{.HelpName}} --tags "category=prod&type=backup" play/mybucket/backup.tar `, } @@ -182,12 +186,9 @@ func checkPipeSyntax(ctx *cli.Context) { func mainPipe(ctx *cli.Context) error { // validate pipe input arguments. checkPipeSyntax(ctx) - // Parse encryption keys per command. - encKeyDB, err := getEncKeys(ctx) - fatalIf(err, "Unable to parse encryption keys.") - // validate pipe input arguments. - checkPipeSyntax(ctx) + encKeyDB, err := validateAndCreateEncryptionKeys(ctx) + fatalIf(err, "Unable to parse encryption keys.") // globalQuiet is true for no window size to get. We just need --quiet here. quiet := ctx.IsSet("quiet") diff --git a/cmd/put-main.go b/cmd/put-main.go index 61bae29fff..7cfec5a9ce 100644 --- a/cmd/put-main.go +++ b/cmd/put-main.go @@ -51,7 +51,7 @@ var putCmd = cli.Command{ Action: mainPut, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(ioFlags, globalFlags...), putFlags...), + Flags: append(append(encFlags, globalFlags...), putFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -61,17 +61,26 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} + ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values + MC_ENC_KMS: KMS encryption key in the form of (alias/prefix=key). + MC_ENC_S3: S3 encryption key in the form of (alias/prefix=key). EXAMPLES: 1. Put an object from local file system to S3 storage - {{.Prompt}} {{.HelpName}} path-to/object ALIAS/BUCKET + {{.Prompt}} {{.HelpName}} path-to/object play/mybucket + 2. Put an object from local file system to S3 bucket with name - {{.Prompt}} {{.HelpName}} path-to/object ALIAS/BUCKET/OBJECT-NAME + {{.Prompt}} {{.HelpName}} path-to/object play/mybucket/object + 3. Put an object from local file system to S3 bucket under a prefix - {{.Prompt}} {{.HelpName}} path-to/object ALIAS/BUCKET/PREFIX/ + {{.Prompt}} {{.HelpName}} path-to/object play/mybucket/object-prefix/ + + 4. Put an object to MinIO storage using sse-c encryption + {{.Prompt}} {{.HelpName}} --enc-c "play/mybucket/object=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" path-to/object play/mybucket/object + + 5. Put an object to MinIO storage using sse-kms encryption + {{.Prompt}} {{.HelpName}} --enc-kms path-to/object play/mybucket/object `, } @@ -99,8 +108,12 @@ func mainPut(cliCtx *cli.Context) (e error) { fatalIf(errInvalidArgument().Trace(strconv.Itoa(threads)), "Invalid number of threads") } - encKeyDB, err := getEncKeys(cliCtx) - fatalIf(err, "Unable to parse encryption keys.") + // Parse encryption keys per command. + encryptionKeys, err := validateAndCreateEncryptionKeys(cliCtx) + if err != nil { + err.Trace(cliCtx.Args()...) + } + fatalIf(err, "SSE Error") if len(args) < 2 { fatalIf(errInvalidArgument().Trace(args...), "Invalid number of arguments.") @@ -125,7 +138,7 @@ func mainPut(cliCtx *cli.Context) (e error) { opts := prepareCopyURLsOpts{ sourceURLs: sourceURLs, targetURL: targetURL, - encKeyDB: encKeyDB, + encKeyDB: encryptionKeys, ignoreBucketExistsCheck: true, } @@ -159,7 +172,7 @@ func mainPut(cliCtx *cli.Context) (e error) { urls := doCopy(ctx, doCopyOpts{ cpURLs: putURLs, pg: pg, - encKeyDB: encKeyDB, + encryptionKeys: encryptionKeys, multipartSize: size, multipartThreads: strconv.Itoa(threads), }) diff --git a/cmd/rm-main.go b/cmd/rm-main.go index 502420fbe6..a44267b183 100644 --- a/cmd/rm-main.go +++ b/cmd/rm-main.go @@ -111,7 +111,7 @@ var rmCmd = cli.Command{ Action: mainRm, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(rmFlags, ioFlags...), globalFlags...), + Flags: append(rmFlags, globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -121,8 +121,6 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values EXAMPLES: 01. Remove a file. @@ -152,16 +150,13 @@ EXAMPLES: 09. Drop all incomplete uploads on the bucket 'jazz-songs'. {{.Prompt}} {{.HelpName}} --incomplete --recursive --force s3/jazz-songs/ - 10. Remove an encrypted object from Amazon S3 cloud storage. - {{.Prompt}} {{.HelpName}} --encrypt-key "s3/sql-backups/=32byteslongsecretkeymustbegiven1" s3/sql-backups/1999/old-backup.tgz - - 11. Bypass object retention in governance mode and delete the object. + 10. Bypass object retention in governance mode and delete the object. {{.Prompt}} {{.HelpName}} --bypass s3/pop-songs/ - 12. Remove a particular version ID. + 11. Remove a particular version ID. {{.Prompt}} {{.HelpName}} s3/docs/money.xls --version-id "f20f3792-4bd4-4288-8d3c-b9d05b3b62f6" - 13. Remove all object versions older than one year. + 12. Remove all object versions older than one year. {{.Prompt}} {{.HelpName}} s3/docs/ --recursive --versions --rewind 365d 14. Perform a fake removal of object(s) versions that are non-current and older than 10 days. If top-level version is a delete @@ -211,7 +206,7 @@ func (r rmMessage) JSON() string { } // Validate command line arguments. -func checkRmSyntax(ctx context.Context, cliCtx *cli.Context, encKeyDB map[string][]prefixSSEPair) { +func checkRmSyntax(ctx context.Context, cliCtx *cli.Context) { // Set command flags from context. isForce := cliCtx.Bool("force") isRecursive := cliCtx.Bool("recursive") @@ -255,7 +250,7 @@ func checkRmSyntax(ctx context.Context, cliCtx *cli.Context, encKeyDB map[string // Note: UNC path using / works properly in go 1.9.2 even though it breaks the UNC specification. url = filepath.ToSlash(filepath.Clean(url)) // namespace removal applies only for non FS. So filter out if passed url represents a directory - dir, _ := isAliasURLDir(ctx, url, encKeyDB, time.Time{}, false) + dir, _ := isAliasURLDir(ctx, url, nil, time.Time{}, false) if dir { _, path := url2Alias(url) isNamespaceRemoval = (path == "") @@ -312,7 +307,6 @@ func removeSingle(url, versionID string, opts removeOpts) error { urlStr: url, versionID: versionID, fileAttr: false, - encKeyDB: opts.encKeyDB, timeRef: time.Time{}, isZip: false, ignoreBucketExistsCheck: false, @@ -407,7 +401,6 @@ type removeOpts struct { isForceDel bool olderThan string newerThan string - encKeyDB map[string][]prefixSSEPair } func printDryRunMsg(targetAlias string, content *ClientContent, printModTime bool) { @@ -707,14 +700,8 @@ func mainRm(cliCtx *cli.Context) error { ctx, cancelRm := context.WithCancel(globalContext) defer cancelRm() - // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) - fatalIf(err, "Unable to parse encryption keys.") - - // check 'rm' cli arguments. - checkRmSyntax(ctx, cliCtx, encKeyDB) + checkRmSyntax(ctx, cliCtx) - // rm specific flags. isIncomplete := cliCtx.Bool("incomplete") isRecursive := cliCtx.Bool("recursive") isFake := cliCtx.Bool("dry-run") || cliCtx.Bool("fake") @@ -752,7 +739,6 @@ func mainRm(cliCtx *cli.Context) error { isBypass: isBypass, olderThan: olderThan, newerThan: newerThan, - encKeyDB: encKeyDB, }) } else { e = removeSingle(url, versionID, removeOpts{ @@ -763,7 +749,6 @@ func mainRm(cliCtx *cli.Context) error { isBypass: isBypass, olderThan: olderThan, newerThan: newerThan, - encKeyDB: encKeyDB, }) } if rerr == nil { @@ -790,7 +775,6 @@ func mainRm(cliCtx *cli.Context) error { isBypass: isBypass, olderThan: olderThan, newerThan: newerThan, - encKeyDB: encKeyDB, }) } else { e = removeSingle(url, versionID, removeOpts{ @@ -801,7 +785,6 @@ func mainRm(cliCtx *cli.Context) error { isBypass: isBypass, olderThan: olderThan, newerThan: newerThan, - encKeyDB: encKeyDB, }) } if rerr == nil { diff --git a/cmd/scan-bar.go b/cmd/scan-bar.go deleted file mode 100644 index d94958372f..0000000000 --- a/cmd/scan-bar.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "fmt" - "strings" - - "github.com/dustin/go-humanize" - "github.com/minio/pkg/v2/console" -) - -// fixateScanBar truncates or stretches text to fit within the terminal size. -func fixateScanBar(text string, width int) string { - if len([]rune(text)) > width { - // Trim text to fit within the screen - trimSize := len([]rune(text)) - width + 3 //"..." - if trimSize < len([]rune(text)) { - text = "..." + text[trimSize:] - } - } else { - text += strings.Repeat(" ", width-len([]rune(text))) - } - return text -} - -// Progress bar function report objects being scaned. -type scanBarFunc func(string) - -// scanBarFactory returns a progress bar function to report URL scanning. -func scanBarFactory() scanBarFunc { - fileCount := 0 - - // Cursor animate channel. - cursorCh := cursorAnimate() - return func(source string) { - scanPrefix := fmt.Sprintf("[%s] %s ", humanize.Comma(int64(fileCount)), <-cursorCh) - source = fixateScanBar(source, globalTermWidth-len([]rune(scanPrefix))) - barText := scanPrefix + source - console.PrintC("\r" + barText + "\r") - fileCount++ - } -} diff --git a/cmd/session-migrate.go b/cmd/session-migrate.go deleted file mode 100644 index 53079e0d8c..0000000000 --- a/cmd/session-migrate.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "os" - "strconv" - - "github.com/minio/mc/pkg/probe" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/quick" -) - -// Migrates session header version '7' to '8'. The only -// change was the adding of insecure global flag -func migrateSessionV7ToV8() { - for _, sid := range getSessionIDs() { - sV7, err := loadSessionV7(sid) - if err != nil { - if os.IsNotExist(err.ToGoError()) { - continue - } - fatalIf(err.Trace(sid), "Unable to load version `7`. Migration failed please report this issue at https://github.com/minio/mc/issues.") - } - - // Close underlying session data file. - sV7.DataFP.Close() - - sessionVersion, e := strconv.Atoi(sV7.Header.Version) - fatalIf(probe.NewError(e), "Unable to load version `7`. Migration failed please report this issue at https://github.com/minio/mc/issues.") - if sessionVersion > 7 { // It is new format. - continue - } - - sessionFile, err := getSessionFile(sid) - fatalIf(err.Trace(sid), "Unable to get session file.") - - // Initialize v7 header and migrate to new config. - sV8Header := &sessionV8Header{} - sV8Header.Version = globalSessionConfigVersion - sV8Header.When = sV7.Header.When - sV8Header.RootPath = sV7.Header.RootPath - sV8Header.GlobalBoolFlags = sV7.Header.GlobalBoolFlags - sV8Header.GlobalIntFlags = sV7.Header.GlobalIntFlags - sV8Header.GlobalStringFlags = sV7.Header.GlobalStringFlags - sV8Header.CommandType = sV7.Header.CommandType - sV8Header.CommandArgs = sV7.Header.CommandArgs - sV8Header.CommandBoolFlags = sV7.Header.CommandBoolFlags - sV8Header.CommandIntFlags = sV7.Header.CommandIntFlags - sV8Header.CommandStringFlags = sV7.Header.CommandStringFlags - sV8Header.LastCopied = sV7.Header.LastCopied - sV8Header.LastRemoved = sV7.Header.LastRemoved - sV8Header.TotalBytes = sV7.Header.TotalBytes - sV8Header.TotalObjects = int64(sV7.Header.TotalObjects) - - // Add insecure flag to the new V8 header - sV8Header.GlobalBoolFlags["insecure"] = false - - qs, e := quick.NewConfig(sV8Header, nil) - fatalIf(probe.NewError(e).Trace(sid), "Unable to initialize quick config for session '8' header.") - - e = qs.Save(sessionFile) - fatalIf(probe.NewError(e).Trace(sid, sessionFile), "Unable to migrate session from '7' to '8'.") - - console.Println("Successfully migrated `" + sessionFile + "` from version `" + sV7.Header.Version + "` to " + "`" + sV8Header.Version + "`.") - } -} - -// Migrates session header version '6' to '7'. Only change is -// LastRemoved field which was added in version '7'. -func migrateSessionV6ToV7() { - for _, sid := range getSessionIDs() { - sV6Header, err := loadSessionV6Header(sid) - if err != nil { - if os.IsNotExist(err.ToGoError()) { - continue - } - fatalIf(err.Trace(sid), "Unable to load version `6`. Migration failed please report this issue at https://github.com/minio/mc/issues.") - } - - sessionVersion, e := strconv.Atoi(sV6Header.Version) - fatalIf(probe.NewError(e), "Unable to load version `6`. Migration failed please report this issue at https://github.com/minio/mc/issues.") - if sessionVersion > 6 { // It is new format. - continue - } - - sessionFile, err := getSessionFile(sid) - fatalIf(err.Trace(sid), "Unable to get session file.") - - // Initialize v7 header and migrate to new config. - sV7Header := &sessionV7Header{} - sV7Header.Version = "7" - sV7Header.When = sV6Header.When - sV7Header.RootPath = sV6Header.RootPath - sV7Header.GlobalBoolFlags = sV6Header.GlobalBoolFlags - sV7Header.GlobalIntFlags = sV6Header.GlobalIntFlags - sV7Header.GlobalStringFlags = sV6Header.GlobalStringFlags - sV7Header.CommandType = sV6Header.CommandType - sV7Header.CommandArgs = sV6Header.CommandArgs - sV7Header.CommandBoolFlags = sV6Header.CommandBoolFlags - sV7Header.CommandIntFlags = sV6Header.CommandIntFlags - sV7Header.CommandStringFlags = sV6Header.CommandStringFlags - sV7Header.LastCopied = sV6Header.LastCopied - sV7Header.LastRemoved = "" - sV7Header.TotalBytes = sV6Header.TotalBytes - sV7Header.TotalObjects = sV6Header.TotalObjects - - qs, e := quick.NewConfig(sV7Header, nil) - fatalIf(probe.NewError(e).Trace(sid), "Unable to initialize quick config for session '7' header.") - - e = qs.Save(sessionFile) - fatalIf(probe.NewError(e).Trace(sid, sessionFile), "Unable to migrate session from '6' to '7'.") - - console.Println("Successfully migrated `" + sessionFile + "` from version `" + sV6Header.Version + "` to " + "`" + sV7Header.Version + "`.") - } -} - -// Migrate session version '5' to version '6', all older sessions are -// in-fact removed and not migrated. All session files from '6' and -// above should be migrated - See: migrateSessionV6ToV7(). -func migrateSessionV5ToV6() { - for _, sid := range getSessionIDs() { - sV6Header, err := loadSessionV6Header(sid) - if err != nil { - if os.IsNotExist(err.ToGoError()) { - continue - } - fatalIf(err.Trace(sid), "Unable to load version `6`. Migration failed please report this issue at https://github.com/minio/mc/issues.") - } - - sessionVersion, e := strconv.Atoi(sV6Header.Version) - fatalIf(probe.NewError(e), "Unable to load version `6`. Migration failed please report this issue at https://github.com/minio/mc/issues.") - if sessionVersion > 5 { // It is new format. - continue - } - - /*** Remove all session files older than v6 ***/ - - sessionFile, err := getSessionFile(sid) - fatalIf(err.Trace(sid), "Unable to get session file.") - - sessionDataFile, err := getSessionDataFile(sid) - fatalIf(err.Trace(sid), "Unable to get session data file.") - - console.Println("Removing unsupported session file `" + sessionFile + "` version `" + sV6Header.Version + "`.") - if e := os.Remove(sessionFile); e != nil { - fatalIf(probe.NewError(e), "Unable to remove version `"+sV6Header.Version+"` session file `"+sessionFile+"`.") - } - if e := os.Remove(sessionDataFile); e != nil { - fatalIf(probe.NewError(e), "Unable to remove version `"+sV6Header.Version+"` session data file `"+sessionDataFile+"`.") - } - } -} diff --git a/cmd/session-old.go b/cmd/session-old.go deleted file mode 100644 index 30daa8e698..0000000000 --- a/cmd/session-old.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "os" - "sync" - "time" - - "github.com/minio/mc/pkg/probe" - "github.com/minio/pkg/v2/quick" -) - -// ///////////////// Session V6 /////////////////// -// sessionV6Header for resumable sessions. -type sessionV6Header struct { - Version string `json:"version"` - When time.Time `json:"time"` - RootPath string `json:"workingFolder"` - GlobalBoolFlags map[string]bool `json:"globalBoolFlags"` - GlobalIntFlags map[string]int `json:"globalIntFlags"` - GlobalStringFlags map[string]string `json:"globalStringFlags"` - CommandType string `json:"commandType"` - CommandArgs []string `json:"cmdArgs"` - CommandBoolFlags map[string]bool `json:"cmdBoolFlags"` - CommandIntFlags map[string]int `json:"cmdIntFlags"` - CommandStringFlags map[string]string `json:"cmdStringFlags"` - LastCopied string `json:"lastCopied"` - TotalBytes int64 `json:"totalBytes"` - TotalObjects int `json:"totalObjects"` -} - -func loadSessionV6Header(sid string) (*sessionV6Header, *probe.Error) { - if !isSessionDirExists() { - return nil, errInvalidArgument().Trace() - } - - sessionFile, err := getSessionFile(sid) - if err != nil { - return nil, err.Trace(sid) - } - - if _, e := os.Stat(sessionFile); e != nil { - return nil, probe.NewError(e) - } - - sV6Header := &sessionV6Header{} - sV6Header.Version = "6" - qs, e := quick.NewConfig(sV6Header, nil) - if e != nil { - return nil, probe.NewError(e).Trace(sid, sV6Header.Version) - } - e = qs.Load(sessionFile) - if e != nil { - return nil, probe.NewError(e).Trace(sid, sV6Header.Version) - } - - sV6Header = qs.Data().(*sessionV6Header) - return sV6Header, nil -} - -/////////////////// Session V7 /////////////////// -// RESERVED FOR FUTURE - -// sessionV7Header for resumable sessions. -type sessionV7Header struct { - Version string `json:"version"` - When time.Time `json:"time"` - RootPath string `json:"workingFolder"` - GlobalBoolFlags map[string]bool `json:"globalBoolFlags"` - GlobalIntFlags map[string]int `json:"globalIntFlags"` - GlobalStringFlags map[string]string `json:"globalStringFlags"` - CommandType string `json:"commandType"` - CommandArgs []string `json:"cmdArgs"` - CommandBoolFlags map[string]bool `json:"cmdBoolFlags"` - CommandIntFlags map[string]int `json:"cmdIntFlags"` - CommandStringFlags map[string]string `json:"cmdStringFlags"` - LastCopied string `json:"lastCopied"` - LastRemoved string `json:"lastRemoved"` - TotalBytes int64 `json:"totalBytes"` - TotalObjects int `json:"totalObjects"` -} - -// sessionV7 resumable session container. -type sessionV7 struct { - Header *sessionV7Header - SessionID string - mutex *sync.Mutex - DataFP *sessionDataFP -} - -// loadSessionV7 - reads session file if exists and re-initiates internal variables -func loadSessionV7(sid string) (*sessionV7, *probe.Error) { - if !isSessionDirExists() { - return nil, errInvalidArgument().Trace() - } - sessionFile, err := getSessionFile(sid) - if err != nil { - return nil, err.Trace(sid) - } - - if _, e := os.Stat(sessionFile); e != nil { - return nil, probe.NewError(e) - } - - s := &sessionV7{} - s.Header = &sessionV7Header{} - s.SessionID = sid - s.Header.Version = "7" - qs, e := quick.NewConfig(s.Header, nil) - if e != nil { - return nil, probe.NewError(e).Trace(sid, s.Header.Version) - } - e = qs.Load(sessionFile) - if e != nil { - return nil, probe.NewError(e).Trace(sid, s.Header.Version) - } - - s.mutex = new(sync.Mutex) - s.Header = qs.Data().(*sessionV7Header) - - sessionDataFile, err := getSessionDataFile(s.SessionID) - if err != nil { - return nil, err.Trace(sid, s.Header.Version) - } - - dataFile, e := os.Open(sessionDataFile) - if e != nil { - return nil, probe.NewError(e) - } - s.DataFP = &sessionDataFP{false, dataFile} - - return s, nil -} diff --git a/cmd/session-v8.go b/cmd/session-v8.go deleted file mode 100644 index 65e428e091..0000000000 --- a/cmd/session-v8.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -// Package cmd - session V8 - Version 8 stores session header and session data in -// two separate files. Session data contains fully prepared URL list. -package cmd - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - "sync" - "time" - - json "github.com/minio/colorjson" - "github.com/minio/mc/pkg/probe" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/quick" -) - -// sessionV8Header for resumable sessions. -type sessionV8Header struct { - Version string `json:"version"` - When time.Time `json:"time"` - RootPath string `json:"workingFolder"` - GlobalBoolFlags map[string]bool `json:"globalBoolFlags"` - GlobalIntFlags map[string]int `json:"globalIntFlags"` - GlobalStringFlags map[string]string `json:"globalStringFlags"` - CommandType string `json:"commandType"` - CommandArgs []string `json:"cmdArgs"` - CommandBoolFlags map[string]bool `json:"cmdBoolFlags"` - CommandIntFlags map[string]int `json:"cmdIntFlags"` - CommandStringFlags map[string]string `json:"cmdStringFlags"` - LastCopied string `json:"lastCopied"` - LastRemoved string `json:"lastRemoved"` - TotalBytes int64 `json:"totalBytes"` - TotalObjects int64 `json:"totalObjects"` - UserMetaData map[string]string `json:"metaData"` -} - -// sessionMessage container for session messages -type sessionMessage struct { - Status string `json:"status"` - SessionID string `json:"sessionId"` - Time time.Time `json:"time"` - CommandType string `json:"commandType"` - CommandArgs []string `json:"commandArgs"` -} - -// sessionV8 resumable session container. -type sessionV8 struct { - Header *sessionV8Header - SessionID string - mutex *sync.Mutex - DataFP *sessionDataFP -} - -// sessionDataFP data file pointer. -type sessionDataFP struct { - dirty bool - *os.File -} - -func (file *sessionDataFP) Write(p []byte) (int, error) { - file.dirty = true - return file.File.Write(p) -} - -// String colorized session message. -func (s sessionV8) String() string { - message := console.Colorize("SessionID", fmt.Sprintf("%s -> ", s.SessionID)) - message = message + console.Colorize("SessionTime", fmt.Sprintf("[%s]", s.Header.When.Local().Format(printDate))) - message = message + console.Colorize("Command", fmt.Sprintf(" %s %s", s.Header.CommandType, strings.Join(s.Header.CommandArgs, " "))) - return message -} - -// JSON jsonified session message. -func (s sessionV8) JSON() string { - sessionMsg := sessionMessage{ - SessionID: s.SessionID, - Time: s.Header.When.Local(), - CommandType: s.Header.CommandType, - CommandArgs: s.Header.CommandArgs, - } - sessionMsg.Status = "success" - sessionBytes, e := json.MarshalIndent(sessionMsg, "", " ") - fatalIf(probe.NewError(e), "Unable to marshal into JSON.") - - return string(sessionBytes) -} - -// loadSessionV8 - reads session file if exists and re-initiates internal variables -func loadSessionV8(sid string) (*sessionV8, *probe.Error) { - if !isSessionDirExists() { - return nil, errInvalidArgument().Trace() - } - sessionFile, err := getSessionFile(sid) - if err != nil { - return nil, err.Trace(sid) - } - - if _, e := os.Stat(sessionFile); e != nil { - return nil, probe.NewError(e) - } - - // Initialize new session. - s := &sessionV8{ - Header: &sessionV8Header{ - Version: globalSessionConfigVersion, - }, - SessionID: sid, - } - - // Initialize session config loader. - qs, e := quick.NewConfig(s.Header, nil) - if e != nil { - return nil, probe.NewError(e).Trace(sid, s.Header.Version) - } - - if e = qs.Load(sessionFile); e != nil { - return nil, probe.NewError(e).Trace(sid, s.Header.Version) - } - - // Validate if the version matches with expected current version. - sV8Header := qs.Data().(*sessionV8Header) - if sV8Header.Version != globalSessionConfigVersion { - msg := fmt.Sprintf("Session header version %s does not match mc session version %s.\n", - sV8Header.Version, globalSessionConfigVersion) - return nil, probe.NewError(errors.New(msg)).Trace(sid, sV8Header.Version) - } - - s.mutex = new(sync.Mutex) - s.Header = sV8Header - - sessionDataFile, err := getSessionDataFile(s.SessionID) - if err != nil { - return nil, err.Trace(sid, s.Header.Version) - } - - dataFile, e := os.Open(sessionDataFile) - if e != nil { - return nil, probe.NewError(e) - } - s.DataFP = &sessionDataFP{false, dataFile} - - return s, nil -} - -// newSessionV8 provides a new session. -func newSessionV8(sessionID string) *sessionV8 { - s := &sessionV8{} - s.Header = &sessionV8Header{} - s.Header.Version = globalSessionConfigVersion - // map of command and files copied. - s.Header.GlobalBoolFlags = make(map[string]bool) - s.Header.GlobalIntFlags = make(map[string]int) - s.Header.GlobalStringFlags = make(map[string]string) - s.Header.CommandArgs = nil - s.Header.CommandBoolFlags = make(map[string]bool) - s.Header.CommandIntFlags = make(map[string]int) - s.Header.CommandStringFlags = make(map[string]string) - s.Header.UserMetaData = make(map[string]string) - s.Header.When = UTCNow() - s.mutex = new(sync.Mutex) - s.SessionID = sessionID - - sessionDataFile, err := getSessionDataFile(s.SessionID) - fatalIf(err.Trace(s.SessionID), "Unable to create session data file \""+sessionDataFile+"\".") - - dataFile, e := os.Create(sessionDataFile) - fatalIf(probe.NewError(e), "Unable to create session data file \""+sessionDataFile+"\".") - - s.DataFP = &sessionDataFP{false, dataFile} - - // Capture state of global flags. - s.setGlobals() - - return s -} - -// HasData provides true if this is a session resume, false otherwise. -func (s sessionV8) HasData() bool { - return s.Header.LastCopied != "" || s.Header.LastRemoved != "" -} - -// NewDataReader provides reader interface to session data file. -func (s *sessionV8) NewDataReader() io.Reader { - // DataFP is always intitialized, either via new or load functions. - s.DataFP.Seek(0, io.SeekStart) - return io.Reader(s.DataFP) -} - -// NewDataReader provides writer interface to session data file. -func (s *sessionV8) NewDataWriter() io.Writer { - // DataFP is always intitialized, either via new or load functions. - s.DataFP.Seek(0, io.SeekStart) - // when moving to file position 0 we want to truncate the file as well, - // otherwise we'll partly overwrite existing data - s.DataFP.Truncate(0) - return io.Writer(s.DataFP) -} - -// Save this session. -func (s *sessionV8) Save() *probe.Error { - s.mutex.Lock() - defer s.mutex.Unlock() - - if s.DataFP.dirty { - if err := s.DataFP.Sync(); err != nil { - return probe.NewError(err) - } - s.DataFP.dirty = false - } - - qs, e := quick.NewConfig(s.Header, nil) - if e != nil { - return probe.NewError(e).Trace(s.SessionID) - } - - sessionFile, err := getSessionFile(s.SessionID) - if err != nil { - return err.Trace(s.SessionID) - } - e = qs.Save(sessionFile) - if e != nil { - return probe.NewError(e).Trace(sessionFile) - } - return nil -} - -// setGlobals captures the state of global variables into session header. -// Used by newSession. -func (s *sessionV8) setGlobals() { - s.Header.GlobalBoolFlags["quiet"] = globalQuiet - s.Header.GlobalBoolFlags["debug"] = globalDebug - s.Header.GlobalBoolFlags["json"] = globalJSON - s.Header.GlobalBoolFlags["noColor"] = globalNoColor - s.Header.GlobalBoolFlags["insecure"] = globalInsecure -} - -// IsModified - returns if in memory session header has changed from -// its on disk value. -func (s *sessionV8) isModified(sessionFile string) (bool, *probe.Error) { - qs, e := quick.NewConfig(s.Header, nil) - if e != nil { - return false, probe.NewError(e).Trace(s.SessionID) - } - - currentHeader := &sessionV8Header{} - currentQS, e := quick.LoadConfig(sessionFile, nil, currentHeader) - if e != nil { - // If session does not exist for the first, return modified to - // be true. - if os.IsNotExist(e) { - return true, nil - } - // For all other errors return. - return false, probe.NewError(e).Trace(s.SessionID) - } - - changedFields, e := qs.DeepDiff(currentQS) - if e != nil { - return false, probe.NewError(e).Trace(s.SessionID) - } - - // Returns true if there are changed entries. - return len(changedFields) > 0, nil -} - -// save - wrapper for quick.Save and saves only if sessionHeader is -// modified. -func (s *sessionV8) save() *probe.Error { - sessionFile, err := getSessionFile(s.SessionID) - if err != nil { - return err.Trace(s.SessionID) - } - - // Verify if sessionFile is modified. - modified, err := s.isModified(sessionFile) - if err != nil { - return err.Trace(s.SessionID) - } - // Header is modified, we save it. - if modified { - qs, e := quick.NewConfig(s.Header, nil) - if e != nil { - return probe.NewError(e).Trace(s.SessionID) - } - // Save an return. - e = qs.Save(sessionFile) - if e != nil { - return probe.NewError(e).Trace(sessionFile) - } - } - return nil -} - -// Close ends this session and removes all associated session files. -func (s *sessionV8) Close() *probe.Error { - s.mutex.Lock() - defer s.mutex.Unlock() - - if err := s.DataFP.Close(); err != nil { - return probe.NewError(err) - } - - // Attempt to save the header if modified. - return s.save() -} - -// Delete removes all the session files. -func (s *sessionV8) Delete() *probe.Error { - s.mutex.Lock() - defer s.mutex.Unlock() - - if s.DataFP != nil { - name := s.DataFP.Name() - // close file pro-actively before deleting - // ignore any error, it could be possibly that - // the file is closed already - s.DataFP.Close() - - // Remove the data file. - if e := os.Remove(name); e != nil { - return probe.NewError(e) - } - } - - // Fetch the session file. - sessionFile, err := getSessionFile(s.SessionID) - if err != nil { - return err.Trace(s.SessionID) - } - - // Remove session file - if e := os.Remove(sessionFile); e != nil { - return probe.NewError(e) - } - - // Remove session backup file if any, ignore any error. - os.Remove(sessionFile + ".old") - - return nil -} - -// Close a session and exit. -func (s sessionV8) CloseAndDie() { - s.Close() - console.Fatalln("Session safely terminated. Run the same command to resume copy again.") -} - -func (s sessionV8) copyCloseAndDie(sessionFlag bool) { - if sessionFlag { - s.Close() - console.Fatalln("Command terminated safely. Run this command to resume copy again.") - } else { - s.mutex.Lock() - defer s.mutex.Unlock() - - s.DataFP.Close() // ignore error. - } -} - -// Create a factory function to simplify checking if -// object was last operated on. -func isLastFactory(lastURL string) func(string) bool { - last := true // closure - return func(sourceURL string) bool { - if sourceURL == "" { - fatalIf(errInvalidArgument().Trace(), "Empty source argument passed.") - } - if lastURL == "" { - return false - } - - if last { - if lastURL == sourceURL { - last = false // from next call onwards we say false. - } - return true - } - return false - } -} diff --git a/cmd/session.go b/cmd/session.go deleted file mode 100644 index 0c2d01f880..0000000000 --- a/cmd/session.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "crypto/sha256" - "encoding/hex" - "os" - "path/filepath" - "strings" - - "github.com/minio/mc/pkg/probe" -) - -// migrateSession migrates all previous migration to latest. -func migrateSession() { - // We no longer support sessions older than v5. They will be removed. - migrateSessionV5ToV6() - - // Migrate V6 to V7. - migrateSessionV6ToV7() - - // Migrate V7 to V8 - migrateSessionV7ToV8() -} - -// createSessionDir - create session directory. -func createSessionDir() *probe.Error { - sessionDir, err := getSessionDir() - if err != nil { - return err.Trace() - } - - if e := os.MkdirAll(sessionDir, 0o700); e != nil { - return probe.NewError(e) - } - return nil -} - -// getSessionDir - get session directory. -func getSessionDir() (string, *probe.Error) { - configDir, err := getMcConfigDir() - if err != nil { - return "", err.Trace() - } - - sessionDir := filepath.Join(configDir, globalSessionDir) - return sessionDir, nil -} - -// isSessionDirExists - verify if session directory exists. -func isSessionDirExists() bool { - sessionDir, err := getSessionDir() - fatalIf(err.Trace(), "Unable to determine session folder.") - - if _, e := os.Stat(sessionDir); e != nil { - return false - } - return true -} - -// getSessionFile - get current session file. -func getSessionFile(sid string) (string, *probe.Error) { - sessionDir, err := getSessionDir() - if err != nil { - return "", err.Trace() - } - - sessionFile := filepath.Join(sessionDir, sid+".json") - return sessionFile, nil -} - -// isSessionExists verifies if given session exists. -func isSessionExists(sid string) bool { - sessionFile, err := getSessionFile(sid) - fatalIf(err.Trace(sid), "Unable to determine session filename for `"+sid+"`.") - - if _, e := os.Stat(sessionFile); e != nil { - return false - } - - return true // Session exists. -} - -// getSessionDataFile - get session data file for a given session. -func getSessionDataFile(sid string) (string, *probe.Error) { - sessionDir, err := getSessionDir() - if err != nil { - return "", err.Trace() - } - - sessionDataFile := filepath.Join(sessionDir, sid+".data") - return sessionDataFile, nil -} - -// getSessionIDs - get all active sessions. -func getSessionIDs() (sids []string) { - sessionDir, err := getSessionDir() - fatalIf(err.Trace(), "Unable to access session folder.") - - sessionList, e := filepath.Glob(sessionDir + "/*.json") - fatalIf(probe.NewError(e), "Unable to access session folder `"+sessionDir+"`.") - - for _, path := range sessionList { - sids = append(sids, strings.TrimSuffix(filepath.Base(path), ".json")) - } - return sids -} - -func getHash(prefix string, args []string) string { - hasher := sha256.New() - for _, arg := range args { - if _, err := hasher.Write([]byte(arg)); err != nil { - panic(err) - } - } - - return prefix + "-" + hex.EncodeToString(hasher.Sum(nil)) -} diff --git a/cmd/session_test.go b/cmd/session_test.go deleted file mode 100644 index d0f0cbff11..0000000000 --- a/cmd/session_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "math/rand" - "os" - "regexp" - - checkv1 "gopkg.in/check.v1" -) - -var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -// newRandomID generates a random id of regular lower case and uppercase english characters. -func newRandomID(n int) string { - sid := make([]rune, n) - for i := range sid { - sid[i] = letters[rand.Intn(len(letters))] - } - return string(sid) -} - -func (s *TestSuite) TestValidSessionID(c *checkv1.C) { - validSid := regexp.MustCompile("^[a-zA-Z]+$") - sid := newRandomID(8) - c.Assert(len(sid), checkv1.Equals, 8) - c.Assert(validSid.MatchString(sid), checkv1.Equals, true) -} - -func (s *TestSuite) TestSession(c *checkv1.C) { - err := createSessionDir() - c.Assert(err, checkv1.IsNil) - c.Assert(isSessionDirExists(), checkv1.Equals, true) - - session := newSessionV8(getHash("cp", []string{"mybucket", "myminio/mybucket"})) - c.Assert(session.Header.CommandArgs, checkv1.IsNil) - c.Assert(len(session.SessionID) >= 8, checkv1.Equals, true) - _, e := os.Stat(session.DataFP.Name()) - c.Assert(e, checkv1.IsNil) - - err = session.Close() - c.Assert(err, checkv1.IsNil) - c.Assert(isSessionExists(session.SessionID), checkv1.Equals, true) - - savedSession, err := loadSessionV8(session.SessionID) - c.Assert(err, checkv1.IsNil) - c.Assert(session.SessionID, checkv1.Equals, savedSession.SessionID) - - err = savedSession.Close() - c.Assert(err, checkv1.IsNil) - - err = savedSession.Delete() - c.Assert(err, checkv1.IsNil) - c.Assert(isSessionExists(session.SessionID), checkv1.Equals, false) - _, e = os.Stat(session.DataFP.Name()) - c.Assert(e, checkv1.NotNil) -} diff --git a/cmd/share-download-main.go b/cmd/share-download-main.go index 62b1d3bd1c..f4475c5993 100644 --- a/cmd/share-download-main.go +++ b/cmd/share-download-main.go @@ -187,7 +187,7 @@ func doShareDownloadURL(ctx context.Context, targetURL, versionID string, isRecu // Make new entries to shareDB. contentType := "" // Not useful for download shares. shareDB.Set(objectURL, shareURL, expiry, contentType) - printMsg(shareMesssage{ + printMsg(shareMessage{ ObjectURL: objectURL, ShareURL: shareURL, TimeLeft: expiry, @@ -205,7 +205,7 @@ func mainShareDownload(cliCtx *cli.Context) error { defer cancelShareDownload() // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) + encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx) fatalIf(err, "Unable to parse encryption keys.") // check input arguments. diff --git a/cmd/share-list-main.go b/cmd/share-list-main.go index 7d66239e82..c1f7786679 100644 --- a/cmd/share-list-main.go +++ b/cmd/share-list-main.go @@ -92,7 +92,7 @@ func doShareList(cmd string) *probe.Error { // Print previously shared entries. for shareURL, share := range shareDB.Shares { - printMsg(shareMesssage{ + printMsg(shareMessage{ ObjectURL: share.URL, ShareURL: shareURL, TimeLeft: share.Expiry - time.Since(share.Date), diff --git a/cmd/share-upload-main.go b/cmd/share-upload-main.go index 0cc5b43def..847631fe41 100644 --- a/cmd/share-upload-main.go +++ b/cmd/share-upload-main.go @@ -171,7 +171,7 @@ func doShareUploadURL(ctx context.Context, objectURL string, isRecursive bool, e return err.Trace(objectURL) } - printMsg(shareMesssage{ + printMsg(shareMessage{ ObjectURL: objectURL, ShareURL: curlCmd, TimeLeft: expiry, diff --git a/cmd/share.go b/cmd/share.go index 60cc4d4120..a8f40b6ec1 100644 --- a/cmd/share.go +++ b/cmd/share.go @@ -51,7 +51,7 @@ var ( ) // Structured share command message. -type shareMesssage struct { +type shareMessage struct { Status string `json:"status"` ObjectURL string `json:"url"` ShareURL string `json:"share"` @@ -60,7 +60,7 @@ type shareMesssage struct { } // String - Themefied string message for console printing. -func (s shareMesssage) String() string { +func (s shareMessage) String() string { msg := console.Colorize("URL", fmt.Sprintf("URL: %s\n", s.ObjectURL)) msg += console.Colorize("Expire", fmt.Sprintf("Expire: %s\n", timeDurationToHumanizedDuration(s.TimeLeft))) if s.ContentType != "" { @@ -78,7 +78,7 @@ func (s shareMesssage) String() string { } // JSON - JSONified message for scripting. -func (s shareMesssage) JSON() string { +func (s shareMessage) JSON() string { s.Status = "success" shareMessageBytes, e := json.MarshalIndent(s, "", " ") fatalIf(probe.NewError(e), "Unable to marshal into JSON.") diff --git a/cmd/sql-main.go b/cmd/sql-main.go index b9d511c1f7..20656ef4e7 100644 --- a/cmd/sql-main.go +++ b/cmd/sql-main.go @@ -80,7 +80,7 @@ var sqlCmd = cli.Command{ Action: mainSQL, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(sqlFlags, ioFlags...), globalFlags...), + Flags: append(append(sqlFlags, encCFlag), globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -90,8 +90,6 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}}{{end}} -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values SERIALIZATION OPTIONS: For query serialization options, refer to https://min.io/docs/minio/linux/reference/minio-mc/mc-sql.html#command-mc.sql @@ -104,7 +102,7 @@ EXAMPLES: {{.Prompt}} {{.HelpName}} --query "select count(s.power) from S3Object s" myminio/iot-devices/power-ratio.csv 3. Run a query on an encrypted object with customer provided keys. - {{.Prompt}} {{.HelpName}} --encrypt-key "myminio/iot-devices=32byteslongsecretkeymustbegiven1" \ + {{.Prompt}} {{.HelpName}} --enc-c "myminio/iot-devices=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" \ --query "select count(s.power) from S3Object s" myminio/iot-devices/power-ratio-encrypted.csv 4. Run a query on an object on MinIO in gzip format using ; as field delimiter, @@ -445,7 +443,7 @@ func mainSQL(cliCtx *cli.Context) error { query string ) // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) + encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx) fatalIf(err, "Unable to parse encryption keys.") // validate sql input arguments. diff --git a/cmd/stat-main.go b/cmd/stat-main.go index c01e62ccb4..2d36480dee 100644 --- a/cmd/stat-main.go +++ b/cmd/stat-main.go @@ -56,7 +56,7 @@ var statCmd = cli.Command{ Action: mainStat, OnUsageError: onUsageError, Before: setGlobalsFromContext, - Flags: append(append(statFlags, ioFlags...), globalFlags...), + Flags: append(append(statFlags, encCFlag), globalFlags...), CustomHelpTemplate: `NAME: {{.HelpName}} - {{.Usage}} @@ -66,8 +66,6 @@ USAGE: FLAGS: {{range .VisibleFlags}}{{.}} {{end}} -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values EXAMPLES: 1. Stat all contents of mybucket on Amazon S3 cloud storage. @@ -79,17 +77,14 @@ EXAMPLES: 3. Stat files recursively on a local filesystem on Microsoft Windows. {{.Prompt}} {{.HelpName}} --recursive C:\Users\mydocuments\ - 4. Stat encrypted files on Amazon S3 cloud storage. - {{.Prompt}} {{.HelpName}} --encrypt-key "s3/personal-docs/=32byteslongsecretkeymustbegiven1" s3/personal-docs/2018-account_report.docx - - 5. Stat encrypted files on Amazon S3 cloud storage. In case the encryption key contains non-printable character like tab, pass the + 4. Stat encrypted files on Amazon S3 cloud storage. In case the encryption key contains non-printable character like tab, pass the base64 encoded string as key. - {{.Prompt}} {{.HelpName}} --encrypt-key "s3/personal-document/=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=" s3/personal-document/2019-account_report.docx + {{.Prompt}} {{.HelpName}} --enc-c "s3/personal-document/=MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" s3/personal-document/2019-account_report.docx - 6. Stat a specific object version. + 5. Stat a specific object version. {{.Prompt}} {{.HelpName}} --version-id "CL3sWgdSN2pNntSf6UnZAuh2kcu8E8si" s3/personal-docs/2018-account_report.docx - 7. Stat all objects versions recursively created before 1st January 2020. + 6. Stat all objects versions recursively created before 1st January 2020. {{.Prompt}} {{.HelpName}} --versions --rewind 2020.01.01T00:00 s3/personal-docs/ `, } @@ -154,7 +149,7 @@ func mainStat(cliCtx *cli.Context) error { console.SetColor("Count", color.New(color.FgGreen)) // Parse encryption keys per command. - encKeyDB, err := getEncKeys(cliCtx) + encKeyDB, err := validateAndCreateEncryptionKeys(cliCtx) fatalIf(err, "Unable to parse encryption keys.") // check 'stat' cli arguments. diff --git a/cmd/suite_test.go b/cmd/suite_test.go new file mode 100644 index 0000000000..9559ecd8e8 --- /dev/null +++ b/cmd/suite_test.go @@ -0,0 +1,2887 @@ +package cmd + +/**/ +import ( + "bytes" + "crypto/md5" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "log" + "math/rand" + "net/http" + "os" + "os/exec" + "runtime" + "runtime/debug" + "strconv" + "strings" + "testing" + "time" + + "github.com/google/uuid" + "github.com/minio/mc/pkg/disk" +) + +// RUN: go test -v ./... -run Test_FullSuite +func Test_FullSuite(t *testing.T) { + if os.Getenv("MC_TEST_RUN_FULL_SUITE") != "true" { + return + } + + defer func() { + r := recover() + if r != nil { + log.Println(r, string(debug.Stack())) + } + + postRunCleanup(t) + }() + + preflightCheck(t) + // initializeTestSuite builds the mc client and creates local files which are used for testing + initializeTestSuite(t) + + // Tests within this function depend on one another + testsThatDependOnOneAnother(t) + + // Alias tests + AddALIASWithError(t) + + // Basic admin user tests + AdminUserFunctionalTest(t) + + // Share upload/download + ShareURLUploadTest(t) + ShareURLDownloadTest(t) + + // TODO .. for some reason the connection is randomly + // reset when running curl. + // ShareURLUploadErrorTests(t) + + // Bucket Error Tests + CreateBucketUsingInvalidSymbols(t) + RemoveBucketWithNameTooLong(t) + RemoveBucketThatDoesNotExist(t) + + // MC_TEST_ENABLE_HTTPS=true + // needs to be set in order to run these tests + if protocol == "https://" { + PutObjectWithSSEC(t) + PutObjectWithSSECMultipart(t) + PutObjectWithSSECInvalidKeys(t) + GetObjectWithSSEC(t) + GetObjectWithSSECWithoutKey(t) + CatObjectWithSSEC(t) + CatObjectWithSSECWithoutKey(t) + CopyObjectWithSSECToNewBucketWithNewKey(t) + MirrorTempDirectoryUsingSSEC(t) + RemoveObjectWithSSEC(t) + } else { + PutObjectErrorWithSSECOverHTTP(t) + } + + // MC_TEST_KMS_KEY=[KEY_NAME] + // needs to be set in order to run these tests + if sseKMSKeyName != "" { + VerifyKMSKey(t) + PutObjectWithSSEKMS(t) + PutObjectWithSSEKMSMultipart(t) + PutObjectWithSSEKMSInvalidKeys(t) + GetObjectWithSSEKMS(t) + CatObjectWithSSEKMS(t) + CopyObjectWithSSEKMSToNewBucket(t) + MirrorTempDirectoryUsingSSEKMS(t) + RemoveObjectWithSSEKMS(t) + + // Error tests + CopyObjectWithSSEKMSWithOverLappingKeys(t) + } + + // MC_TEST_ENABLE_SSE_S3=true + // needs to be set to in order to run these tests. + if sseS3Enabled { + PutObjectWithSSES3(t) + PutObjectWithSSES3Multipart(t) + GetObjectWithSSES3(t) + CatObjectWithSSES3(t) + CopyObjectWithSSES3ToNewBucket(t) + MirrorTempDirectoryUsingSSES3(t) + } + + if protocol == "https://" && sseKMSKeyName != "" { + CopyObjectWithSSEKMSToNewBucketWithSSEC(t) + } + + // (DEPRECATED CLI PARAMETERS) + if includeDeprecatedMethods { + fmt.Println("No deprecated methods implemented") + } +} + +func testsThatDependOnOneAnother(t *testing.T) { + CreateFileBundle() + // uploadAllFiles uploads all files in FileMap to MainTestBucket + uploadAllFiles(t) + // LSObjects saves the output of LS inside *testFile in FileMap + LSObjects(t) + // StatObjecsts saves the output of Stat inside *testFile in FileMap + StatObjects(t) + // ValidateFileMetaDataPostUpload validates the output of LS and Stat + ValidateFileMetaData(t) + + // DU tests + DUBucket(t) + + // Std in/out .. pipe/cat + CatObjectToStdIn(t) + CatObjectFromStdin(t) + + // Preserve attributes + PutObjectPreserveAttributes(t) + + // Mirror + MirrorTempDirectoryStorageClassReducedRedundancy(t) + MirrorTempDirectory(t) + + // General object tests + FindObjects(t) + FindObjectsUsingName(t) + FindObjectsUsingNameAndFilteringForTxtType(t) + FindObjectsLargerThan64Mebibytes(t) + FindObjectsSmallerThan64Mebibytes(t) + FindObjectsOlderThan1d(t) + FindObjectsNewerThen1d(t) + GetObjectsAndCompareMD5(t) +} + +type TestUser struct { + Username string + Password string +} + +var ( + oneMBSlice [1048576]byte // 1x Mebibyte + defaultAlias = "mintest" + fileMap = make(map[string]*testFile) + randomLargeString = "lksdjfljsdklfjklsdjfklksjdf;lsjdk;fjks;djflsdlfkjskldjfklkljsdfljsldkfjklsjdfkljsdklfjklsdjflksjdlfjsdjflsjdflsldfjlsjdflksjdflkjslkdjflksfdj" + jsonFlag = "--json" + insecureFlag = "--insecure" + jsonOutput = true + printRawOut = false + skipBuild = false + mcCmd = ".././mc" + preCmdParameters = make([]string, 0) + buildPath = "../." + metaPrefix = "X-Amz-Meta-" + includeDeprecatedMethods = false + + serverEndpoint = "127.0.0.1:9000" + acessKey = "minioadmin" + secretKey = "minioadmin" + protocol = "http://" + skipInsecure = true + tempDir = "" + mainTestBucket string + sseTestBucket string + bucketList = make([]string, 0) + userList = make(map[string]TestUser, 0) + + // KMS + sseBaseEncodedKey = "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" + invalidSSEBaseEncodedKey = "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5" + sseBaseEncodedKey2 = "MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5YWE" + sseKMSKeyName = "" + sseInvalidKmsKeyName = "" + sseS3Enabled = false + + curlPath = "/usb/bin/curl" + HTTPClient *http.Client + failIndicator = "!! FAIL !! _______________________ !! FAIL !! _______________________ !! FAIL !!" +) + +func openFileAndGetMd5Sum(path string) (md5s string, err error) { + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + fb, err := io.ReadAll(f) + if err != nil { + return "", err + } + md5s = GetMD5Sum(fb) + return +} + +func GetMBSizeInBytes(MB int) int64 { + return int64(MB * len(oneMBSlice)) +} + +func initializeTestSuite(t *testing.T) { + shouldSkipBuild := os.Getenv("MC_TEST_SKIP_BUILD") + skipBuild, _ = strconv.ParseBool(shouldSkipBuild) + fmt.Println("SKIP BUILD:", skipBuild) + if !skipBuild { + err := BuildCLI() + if err != nil { + os.Exit(1) + } + } + envBuildPath := os.Getenv("MC_TEST_BUILD_PATH") + if envBuildPath != "" { + buildPath = envBuildPath + } + + envALIAS := os.Getenv("MC_TEST_ALIAS") + if envALIAS != "" { + defaultAlias = envALIAS + } + + envSecretKey := os.Getenv("MC_TEST_SECRET_KEY") + if envSecretKey != "" { + secretKey = envSecretKey + } + + envAccessKey := os.Getenv("MC_TEST_ACCESS_KEY") + if envAccessKey != "" { + acessKey = envAccessKey + } + + envServerEndpoint := os.Getenv("MC_TEST_SERVER_ENDPOINT") + if envServerEndpoint != "" { + serverEndpoint = envServerEndpoint + } + + envIncludeDeprecated := os.Getenv("MC_TEST_INCLUDE_DEPRECATED") + includeDeprecatedMethods, _ = strconv.ParseBool(envIncludeDeprecated) + + envKmsKey := os.Getenv("MC_TEST_KMS_KEY") + if envKmsKey != "" { + sseKMSKeyName = envKmsKey + } + + envSSES3Enabled := os.Getenv("MC_TEST_ENABLE_SSE_S3") + if envSSES3Enabled != "" { + sseS3Enabled, _ = strconv.ParseBool(envSSES3Enabled) + } + + envSkipInsecure := os.Getenv("MC_TEST_SKIP_INSECURE") + if envSkipInsecure != "" { + skipInsecure, _ = strconv.ParseBool(envSkipInsecure) + } + + envEnableHTTP := os.Getenv("MC_TEST_ENABLE_HTTPS") + EnableHTTPS, _ := strconv.ParseBool(envEnableHTTP) + if EnableHTTPS { + protocol = "https://" + } + + envCMD := os.Getenv("MC_TEST_BINARY_PATH") + if envCMD != "" { + mcCmd = envCMD + } + + var err error + tempDir, err = os.MkdirTemp("", "test-") + if err != nil { + log.Println(err) + os.Exit(1) + } + + for i := 0; i < len(oneMBSlice); i++ { + oneMBSlice[i] = byte(rand.Intn(250)) + } + + for i := 0; i < 10; i++ { + tmpNameMap["aaa"+strconv.Itoa(i)] = false + } + for i := 0; i < 10; i++ { + tmpNameMap["bbb"+strconv.Itoa(i)] = false + } + for i := 0; i < 10; i++ { + tmpNameMap["ccc"+strconv.Itoa(i)] = false + } + for i := 0; i < 10; i++ { + tmpNameMap["ddd"+strconv.Itoa(i)] = false + } + + HTTPClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: skipInsecure}, + }, + } + + if jsonOutput { + preCmdParameters = append(preCmdParameters, jsonFlag) + } + + if skipInsecure { + preCmdParameters = append(preCmdParameters, insecureFlag) + } + + CreateTestUsers() + + _, err = RunMC( + "alias", + "set", + defaultAlias, + protocol+serverEndpoint, + acessKey, + secretKey, + ) + fatalIfError(err, t) + + out, err := RunMC("--version") + fatalIfError(err, t) + fmt.Println(out) + + preRunCleanup() + + mainTestBucket = CreateBucket(t) + sseTestBucket = CreateBucket(t) +} + +func preflightCheck(t *testing.T) { + out, err := exec.Command("which", "curl").Output() + fatalIfError(err, t) + if len(out) == 0 { + fatalMsgOnly("No curl found, output from 'which curl': "+string(out), t) + } + curlPath = string(out) +} + +func CreateTestUsers() { + userList["user1"] = TestUser{ + Username: "user1", + Password: "user1-password", + } + userList["user2"] = TestUser{ + Username: "user2", + Password: "user2-password", + } + userList["user3"] = TestUser{ + Username: "user3", + Password: "user3-password", + } +} + +func CreateFileBundle() { + createFile(newTestFile{ + tag: "0M", + prefix: "", + extension: ".jpg", + storageClass: "", + sizeInMBS: 0, + tags: map[string]string{"name": "0M"}, + // uploadShouldFail: false, + addToGlobalFileMap: true, + }) + createFile(newTestFile{ + tag: "1M", + prefix: "", + extension: ".txt", + storageClass: "REDUCED_REDUNDANCY", + sizeInMBS: 1, + metaData: map[string]string{"name": "1M"}, + tags: map[string]string{"tag1": "1M-tag"}, + // uploadShouldFail: false, + addToGlobalFileMap: true, + }) + createFile(newTestFile{ + tag: "2M", + prefix: "LVL1", + extension: ".jpg", + storageClass: "REDUCED_REDUNDANCY", + sizeInMBS: 2, + metaData: map[string]string{"name": "2M"}, + // uploadShouldFail: false, + addToGlobalFileMap: true, + }) + createFile(newTestFile{ + tag: "3M", + prefix: "LVL1/LVL2", + extension: ".png", + storageClass: "", + sizeInMBS: 3, + metaData: map[string]string{"name": "3M"}, + // uploadShouldFail: false, + addToGlobalFileMap: true, + }) + createFile(newTestFile{ + tag: "65M", + prefix: "LVL1/LVL2/LVL3", + extension: ".exe", + storageClass: "", + sizeInMBS: 65, + metaData: map[string]string{"name": "65M", "tag1": "value1"}, + // uploadShouldFail: false, + addToGlobalFileMap: true, + }) +} + +var tmpNameMap = make(map[string]bool) + +func GetRandomName() string { + for i := range tmpNameMap { + if tmpNameMap[i] == false { + tmpNameMap[i] = true + return i + } + } + return uuid.NewString() +} + +func CreateBucket(t *testing.T) (bucketPath string) { + bucketName := "test-" + GetRandomName() + bucketPath = defaultAlias + "/" + bucketName + out, err := RunMC("mb", bucketPath) + if err != nil { + t.Fatalf("Unable to create bucket (%s) err: %s", bucketPath, out) + return + } + bucketList = append(bucketList, bucketPath) + out, err = RunMC("stat", defaultAlias+"/"+bucketName) + if err != nil { + t.Fatalf("Unable to ls stat (%s) err: %s", defaultAlias+"/"+bucketName, out) + return + } + if !strings.Contains(out, bucketName) { + t.Fatalf("stat output does not contain bucket name (%s)", bucketName) + } + return +} + +func AddALIASWithError(t *testing.T) { + out, err := RunMC( + "alias", + "set", + defaultAlias, + protocol+serverEndpoint, + acessKey, + "random-invalid-secret-that-will-not-work", + ) + fatalIfNoErrorWMsg(err, out, t) +} + +func AdminUserFunctionalTest(t *testing.T) { + user1Bucket := CreateBucket(t) + + user1File := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "user1", + sizeInMBS: 1, + }) + + out, err := RunMC( + "admin", + "user", + "add", + defaultAlias, + userList["user1"].Username, + userList["user1"].Password, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "admin", + "user", + "list", + defaultAlias, + ) + fatalIfErrorWMsg(err, out, t) + userOutput, err := parseUserMessageListOutput(out) + fatalIfErrorWMsg(err, out, t) + + user1found := false + for i := range userOutput { + if userOutput[i].AccessKey == userList["user1"].Username { + user1found = true + } + } + + if !user1found { + fatalMsgOnly(fmt.Sprintf("did not find user %s when running admin user list --json", userList["user1"].Username), t) + } + + out, err = RunMC( + "admin", + "policy", + "attach", + defaultAlias, + "readwrite", + "--user="+userList["user1"].Username, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "alias", + "set", + userList["user1"].Username, + protocol+serverEndpoint, + userList["user1"].Username, + userList["user1"].Password, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cp", + user1File.diskFile.Name(), + user1Bucket+"/"+user1File.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) +} + +func ShareURLUploadErrorTests(t *testing.T) { + shareURLErrorBucket := CreateBucket(t) + + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "presigned-error", + sizeInMBS: 1, + }) + + out, err := RunMC( + "share", + "upload", + shareURLErrorBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + shareMsg, err := parseShareMessageFromJSONOutput(out) + fatalIfErrorWMsg(err, out, t) + + finalURL := strings.Replace(shareMsg.ShareURL, "", file.diskFile.Name(), -1) + splitCommand := strings.Split(finalURL, " ") + + if skipInsecure { + splitCommand = append(splitCommand, "--insecure") + } + + bucketOnly := strings.Replace(shareURLErrorBucket, defaultAlias+"/", "", -1) + + // Modify base url bucket path + newCmd := make([]string, len(splitCommand)) + copy(newCmd, splitCommand) + newCmd[1] = strings.Replace(newCmd[1], bucketOnly, "fake-bucket-name", -1) + out, _ = RunCommand(newCmd[0], newCmd[1:]...) + curlFatalIfNoErrorTag(out, t) + + // Modify -F key=X + newCmd = make([]string, len(splitCommand)) + copy(newCmd, splitCommand) + for i := range newCmd { + if strings.HasPrefix(newCmd[i], "key=") { + newCmd[i] = "key=fake-object-name" + break + } + } + out, _ = RunCommand(newCmd[0], newCmd[1:]...) + curlFatalIfNoErrorTag(out, t) +} + +func ShareURLUploadTest(t *testing.T) { + ShareURLTestBucket := CreateBucket(t) + + file := createFile(newTestFile{ + addToGlobalFileMap: true, + tag: "presigned-upload", + sizeInMBS: 1, + }) + + out, err := RunMC( + "share", + "upload", + ShareURLTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + shareMsg, err := parseShareMessageFromJSONOutput(out) + fatalIfErrorWMsg(err, out, t) + + finalURL := strings.Replace(shareMsg.ShareURL, "", file.diskFile.Name(), -1) + splitCommand := strings.Split(finalURL, " ") + + if skipInsecure { + splitCommand = append(splitCommand, "--insecure") + } + + _, err = exec.Command(splitCommand[0], splitCommand[1:]...).CombinedOutput() + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "stat", + ShareURLTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + statMsg, err := parseStatSingleObjectJSONOutput(out) + fatalIfError(err, t) + + if statMsg.ETag != file.md5Sum { + fatalMsgOnly(fmt.Sprintf("expecting md5sum (%s) but got md5sum (%s)", file.md5Sum, file.md5Sum), t) + } +} + +func ShareURLDownloadTest(t *testing.T) { + ShareURLTestBucket := CreateBucket(t) + file := createFile(newTestFile{ + addToGlobalFileMap: true, + tag: "presigned-download", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + file.diskFile.Name(), + ShareURLTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "share", + "download", + ShareURLTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + shareMsg, err := parseShareMessageFromJSONOutput(out) + fatalIfErrorWMsg(err, out, t) + + resp, err := HTTPClient.Get(shareMsg.ShareURL) + fatalIfError(err, t) + + downloadedFile, err := io.ReadAll(resp.Body) + fatalIfError(err, t) + + md5sum := GetMD5Sum(downloadedFile) + if md5sum != file.md5Sum { + fatalMsgOnly( + fmt.Sprintf("expecting md5sum (%s) but got md5sum (%s)", file.md5Sum, md5sum), + t, + ) + } +} + +func PutObjectPreserveAttributes(t *testing.T) { + AttrTestBucket := CreateBucket(t) + file := fileMap["1M"] + out, err := RunMC( + "cp", + "-a", + file.diskFile.Name(), + AttrTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "stat", + AttrTestBucket+"/"+file.fileNameWithPrefix, + ) + fatalIfError(err, t) + + stats, err := parseStatSingleObjectJSONOutput(out) + fatalIfError(err, t) + + attr, err := disk.GetFileSystemAttrs(file.diskFile.Name()) + fatalIfError(err, t) + if attr != stats.Metadata["X-Amz-Meta-Mc-Attrs"] { + fatalMsgOnly(fmt.Sprintf("expecting file attributes (%s) but got file attributes (%s)", attr, stats.Metadata["X-Amz-Meta-Mc-Attrs"]), t) + } +} + +func MirrorTempDirectoryStorageClassReducedRedundancy(t *testing.T) { + MirrorBucket := CreateBucket(t) + out, err := RunMC( + "mirror", + "--storage-class", "REDUCED_REDUNDANCY", + tempDir, + MirrorBucket, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC("ls", "-r", MirrorBucket) + fatalIfError(err, t) + + fileList, err := parseLSJSONOutput(out) + fatalIfError(err, t) + + for i, f := range fileMap { + fileFound := false + + for _, o := range fileList { + if o.Key == f.fileNameWithoutPath { + fileMap[i].MinioLS = o + fileFound = true + } + } + + if !fileFound { + t.Fatalf("File was not uploaded: %s", f.fileNameWithPrefix) + } + } +} + +func MirrorTempDirectory(t *testing.T) { + MirrorBucket := CreateBucket(t) + + out, err := RunMC( + "mirror", + tempDir, + MirrorBucket, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC("ls", "-r", MirrorBucket) + fatalIfError(err, t) + + fileList, err := parseLSJSONOutput(out) + fatalIfError(err, t) + + for i, f := range fileMap { + fileFound := false + + for _, o := range fileList { + if o.Key == f.fileNameWithoutPath { + fileMap[i].MinioLS = o + fileFound = true + } + } + + if !fileFound { + t.Fatalf("File was not uploaded: %s", f.fileNameWithPrefix) + } + } +} + +func CatObjectFromStdin(t *testing.T) { + objectName := "pipe-test-object" + CatEchoBucket := CreateBucket(t) + + file := fileMap["1M"] + + cmdCAT := exec.Command( + "cat", + file.diskFile.Name(), + ) + + p := []string{ + "pipe", + CatEchoBucket + "/" + objectName, + } + if skipInsecure { + p = append(p, "--insecure") + } + + cmdMC := exec.Command(mcCmd, p...) + + r, w := io.Pipe() + defer r.Close() + defer w.Close() + + cmdCAT.Stdout = w + cmdMC.Stdin = r + + err := cmdMC.Start() + fatalIfError(err, t) + err = cmdCAT.Start() + fatalIfError(err, t) + + err = cmdCAT.Wait() + fatalIfError(err, t) + w.Close() + err = cmdMC.Wait() + fatalIfError(err, t) + r.Close() + + outB, err := RunMC( + "cat", + CatEchoBucket+"/"+objectName, + ) + fatalIfErrorWMsg(err, outB, t) + + md5SumCat := GetMD5Sum([]byte(outB)) + if file.md5Sum != md5SumCat { + fatalMsgOnly( + fmt.Sprintf("expecting md5sum (%s) but got md5sum (%s)", file.md5Sum, md5SumCat), + t, + ) + } +} + +func CatObjectToStdIn(t *testing.T) { + file := fileMap["1M"] + out, err := RunMC( + "cat", + mainTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + md5Sum := GetMD5Sum([]byte(out)) + if md5Sum != file.md5Sum { + fatalMsgOnly( + fmt.Sprintf("expecting md5sum (%s) but got md5sum (%s)", file.md5Sum, md5Sum), + t, + ) + } +} + +func VerifyKMSKey(t *testing.T) { + out, err := RunMC( + "admin", + "kms", + "key", + "list", + defaultAlias, + ) + fatalIfError(err, t) + keyMsg := new(kmsKeysMsg) + err = json.Unmarshal([]byte(out), keyMsg) + fatalIfError(err, t) + sseInvalidKmsKeyName = uuid.NewString() + found := false + invalidKeyFound := false + for _, v := range keyMsg.Keys { + if v == sseKMSKeyName { + found = true + break + } + if v == sseInvalidKmsKeyName { + invalidKeyFound = true + } + } + if !found { + fatalMsgOnly(fmt.Sprintf("expected to find kms key %s but got these keys: %v", sseKMSKeyName, keyMsg.Keys), t) + } + if invalidKeyFound { + fatalMsgOnly("tried to create invalid uuid kms key but for some reason it overlapped with an already existing key", t) + } +} + +func PutObjectWithSSEKMS(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encput-kms", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-kms", + sseTestBucket+"="+sseKMSKeyName, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) +} + +func PutObjectWithSSEKMSMultipart(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encmultiput-kms", + sizeInMBS: 68, + }) + + out, err := RunMC( + "cp", + "--enc-kms", + sseTestBucket+"="+sseKMSKeyName, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) +} + +func PutObjectWithSSEKMSInvalidKeys(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encerror-kms", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-kms="+sseTestBucket+"="+sseInvalidKmsKeyName, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfNoErrorWMsg(err, out, t) +} + +func GetObjectWithSSES3(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encget-s3", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-s3="+sseTestBucket, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cp", + sseTestBucket+"/"+file.fileNameWithoutPath, + file.diskFile.Name()+".download", + ) + fatalIfErrorWMsg(err, out, t) + + md5s, err := openFileAndGetMd5Sum(file.diskFile.Name() + ".download") + fatalIfError(err, t) + if md5s != file.md5Sum { + fatalMsgOnly(fmt.Sprintf("expecting md5sum (%s) but got sum (%s)", file.md5Sum, md5s), t) + } +} + +func CatObjectWithSSES3(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "enccat-s3", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-s3="+sseTestBucket, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cat", + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + catMD5Sum := GetMD5Sum([]byte(out)) + + if catMD5Sum != file.md5Sum { + fatalMsgOnly(fmt.Sprintf( + "expected md5sum %s but we got %s", + file.md5Sum, + catMD5Sum, + ), t) + } + + if int64(len(out)) != file.diskStat.Size() { + fatalMsgOnly(fmt.Sprintf( + "file size is %d but we got %d", + file.diskStat.Size(), + len(out), + ), t) + } + + fatalIfErrorWMsg( + err, + "cat length: "+strconv.Itoa(len(out))+" -- file length:"+strconv.Itoa(int(file.diskStat.Size())), + t, + ) +} + +func CopyObjectWithSSES3ToNewBucket(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encbucketcopy-s3", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-s3="+sseTestBucket, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + TargetSSEBucket := CreateBucket(t) + + out, err = RunMC( + "cp", + "--enc-s3="+TargetSSEBucket, + sseTestBucket+"/"+file.fileNameWithoutPath, + TargetSSEBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cp", + TargetSSEBucket+"/"+file.fileNameWithoutPath, + file.diskFile.Name()+".download", + ) + fatalIfErrorWMsg(err, out, t) + + md5s, err := openFileAndGetMd5Sum(file.diskFile.Name() + ".download") + fatalIfError(err, t) + if md5s != file.md5Sum { + fatalMsgOnly(fmt.Sprintf("expecting md5sum (%s) but got sum (%s)", file.md5Sum, md5s), t) + } +} + +func MirrorTempDirectoryUsingSSES3(t *testing.T) { + MirrorBucket := CreateBucket(t) + + subDir := "encmirror-s3" + + f1 := createFile(newTestFile{ + addToGlobalFileMap: false, + subDir: subDir, + tag: "encmirror1-s3", + sizeInMBS: 1, + }) + + f2 := createFile(newTestFile{ + addToGlobalFileMap: false, + subDir: subDir, + tag: "encmirror2-s3", + sizeInMBS: 2, + }) + + f3 := createFile(newTestFile{ + addToGlobalFileMap: false, + subDir: subDir, + tag: "encmirror3-s3", + sizeInMBS: 4, + }) + + files := append([]*testFile{}, f1, f2, f3) + + out, err := RunMC( + "mirror", + "--enc-s3="+MirrorBucket, + tempDir+string(os.PathSeparator)+subDir, + MirrorBucket, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC("ls", "-r", MirrorBucket) + fatalIfError(err, t) + + fileList, err := parseLSJSONOutput(out) + fatalIfError(err, t) + + for i, f := range files { + fileFound := false + + for _, o := range fileList { + if o.Key == f.fileNameWithoutPath { + files[i].MinioLS = o + fileFound = true + } + } + + if !fileFound { + fatalMsgOnly(fmt.Sprintf( + "File was not uploaded: %s", + f.fileNameWithPrefix, + ), t) + } + + out, err := RunMC("stat", MirrorBucket+"/"+files[i].MinioLS.Key) + fatalIfError(err, t) + stat, err := parseStatSingleObjectJSONOutput(out) + fatalIfError(err, t) + files[i].MinioStat = stat + + foundKmsTag := false + for ii := range stat.Metadata { + if ii == amzObjectSSE { + foundKmsTag = true + break + } + } + + if !foundKmsTag { + fmt.Println(stat) + fatalMsgOnly(amzObjectSSEKMSKeyID+" not found for object "+files[i].MinioLS.Key, t) + } + + } +} + +func PutObjectWithSSES3(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encput-s3", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-s3="+sseTestBucket, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) +} + +func PutObjectWithSSES3Multipart(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encmultiput-s3", + sizeInMBS: 68, + }) + + out, err := RunMC( + "cp", + "--enc-s3="+sseTestBucket, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) +} + +func GetObjectWithSSEKMS(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encget-kms", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-kms="+sseTestBucket+"="+sseKMSKeyName, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cp", + sseTestBucket+"/"+file.fileNameWithoutPath, + file.diskFile.Name()+".download", + ) + fatalIfErrorWMsg(err, out, t) + + md5s, err := openFileAndGetMd5Sum(file.diskFile.Name() + ".download") + fatalIfError(err, t) + if md5s != file.md5Sum { + fatalMsgOnly(fmt.Sprintf("expecting md5sum (%s) but got sum (%s)", file.md5Sum, md5s), t) + } +} + +func PutObjectWithSSECMultipart(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encmultiput", + sizeInMBS: 68, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) +} + +func CatObjectWithSSEKMS(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "enccat-kms", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-kms="+sseTestBucket+"="+sseKMSKeyName, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cat", + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + catMD5Sum := GetMD5Sum([]byte(out)) + + if catMD5Sum != file.md5Sum { + fatalMsgOnly(fmt.Sprintf( + "expected md5sum %s but we got %s", + file.md5Sum, + catMD5Sum, + ), t) + } + + if int64(len(out)) != file.diskStat.Size() { + fatalMsgOnly(fmt.Sprintf( + "file size is %d but we got %d", + file.diskStat.Size(), + len(out), + ), t) + } + + fatalIfErrorWMsg( + err, + "cat length: "+strconv.Itoa(len(out))+" -- file length:"+strconv.Itoa(int(file.diskStat.Size())), + t, + ) +} + +func PutObjectWithSSEC(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encput", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) +} + +func PutObjectErrorWithSSECOverHTTP(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encput-http", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfNoErrorWMsg(err, out, t) +} + +func PutObjectWithSSECInvalidKeys(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encerror-dep", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+invalidSSEBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfNoErrorWMsg(err, out, t) +} + +func GetObjectWithSSEC(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encget", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + sseTestBucket+"/"+file.fileNameWithoutPath, + file.diskFile.Name()+".download", + ) + fatalIfErrorWMsg(err, out, t) + + md5s, err := openFileAndGetMd5Sum(file.diskFile.Name() + ".download") + fatalIfError(err, t) + if md5s != file.md5Sum { + fatalMsgOnly(fmt.Sprintf("expecting md5sum (%s) but got sum (%s)", file.md5Sum, md5s), t) + } +} + +func GetObjectWithSSECWithoutKey(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encerror", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cp", + sseTestBucket+"/"+file.fileNameWithoutPath, + file.diskFile.Name()+"-get", + ) + fatalIfNoErrorWMsg(err, out, t) +} + +func CatObjectWithSSEC(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "enccat", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cat", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + catMD5Sum := GetMD5Sum([]byte(out)) + + if catMD5Sum != file.md5Sum { + fatalMsgOnly(fmt.Sprintf( + "expected md5sum %s but we got %s", + file.md5Sum, + catMD5Sum, + ), t) + } + + if int64(len(out)) != file.diskStat.Size() { + fatalMsgOnly(fmt.Sprintf( + "file size is %d but we got %d", + file.diskStat.Size(), + len(out), + ), t) + } + + fatalIfErrorWMsg( + err, + "cat length: "+strconv.Itoa(len(out))+" -- file length:"+strconv.Itoa(int(file.diskStat.Size())), + t, + ) +} + +func CopyObjectWithSSEKMSWithOverLappingKeys(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encbucketcopy-kms", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-kms="+sseTestBucket+"="+sseKMSKeyName, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + TargetSSEBucket := CreateBucket(t) + + out, err = RunMC( + "cp", + "--enc-kms="+TargetSSEBucket+"="+sseKMSKeyName, + "--enc-kms="+TargetSSEBucket+"="+sseKMSKeyName, + sseTestBucket+"/"+file.fileNameWithoutPath, + TargetSSEBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfNoErrorWMsg(err, out, t) +} + +func CopyObjectWithSSEKMSToNewBucket(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encbucketcopy-kms", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-kms="+sseTestBucket+"="+sseKMSKeyName, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + TargetSSEBucket := CreateBucket(t) + + out, err = RunMC( + "cp", + "--enc-kms="+TargetSSEBucket+"="+sseKMSKeyName, + "--enc-kms="+sseTestBucket+"="+sseKMSKeyName, + sseTestBucket+"/"+file.fileNameWithoutPath, + TargetSSEBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cp", + "--enc-kms="+TargetSSEBucket+"="+sseKMSKeyName, + TargetSSEBucket+"/"+file.fileNameWithoutPath, + file.diskFile.Name()+".download", + ) + fatalIfErrorWMsg(err, out, t) + + md5s, err := openFileAndGetMd5Sum(file.diskFile.Name() + ".download") + fatalIfError(err, t) + if md5s != file.md5Sum { + fatalMsgOnly(fmt.Sprintf("expecting md5sum (%s) but got sum (%s)", file.md5Sum, md5s), t) + } +} + +func CopyObjectWithSSEKMSToNewBucketWithSSEC(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encbucketcopy-kms-c", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-kms="+sseTestBucket+"="+sseKMSKeyName, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + TargetSSEBucket := CreateBucket(t) + + out, err = RunMC( + "cp", + "--enc-c="+TargetSSEBucket+"="+sseBaseEncodedKey, + sseTestBucket+"/"+file.fileNameWithoutPath, + TargetSSEBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cp", + "--enc-c="+TargetSSEBucket+"="+sseBaseEncodedKey, + TargetSSEBucket+"/"+file.fileNameWithoutPath, + file.diskFile.Name()+".download", + ) + fatalIfErrorWMsg(err, out, t) + + md5s, err := openFileAndGetMd5Sum(file.diskFile.Name() + ".download") + fatalIfError(err, t) + if md5s != file.md5Sum { + fatalMsgOnly(fmt.Sprintf("expecting md5sum (%s) but got sum (%s)", file.md5Sum, md5s), t) + } +} + +func MirrorTempDirectoryUsingSSEKMS(t *testing.T) { + MirrorBucket := CreateBucket(t) + + subDir := "encmirror-kms" + + f1 := createFile(newTestFile{ + addToGlobalFileMap: false, + subDir: subDir, + tag: "encmirror1-kms", + sizeInMBS: 1, + }) + + f2 := createFile(newTestFile{ + addToGlobalFileMap: false, + subDir: subDir, + tag: "encmirror2-kms", + sizeInMBS: 2, + }) + + f3 := createFile(newTestFile{ + addToGlobalFileMap: false, + subDir: subDir, + tag: "encmirror3-kms", + sizeInMBS: 4, + }) + + files := append([]*testFile{}, f1, f2, f3) + + out, err := RunMC( + "mirror", + "--enc-kms="+MirrorBucket+"="+sseKMSKeyName, + tempDir+string(os.PathSeparator)+subDir, + MirrorBucket, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC("ls", "-r", MirrorBucket) + fatalIfError(err, t) + + fileList, err := parseLSJSONOutput(out) + fatalIfError(err, t) + + for i, f := range files { + fileFound := false + + for _, o := range fileList { + if o.Key == f.fileNameWithoutPath { + files[i].MinioLS = o + fileFound = true + } + } + + if !fileFound { + fatalMsgOnly(fmt.Sprintf( + "File was not uploaded: %s", + f.fileNameWithPrefix, + ), t) + } + + out, err := RunMC("stat", MirrorBucket+"/"+files[i].MinioLS.Key) + fatalIfError(err, t) + stat, err := parseStatSingleObjectJSONOutput(out) + fatalIfError(err, t) + files[i].MinioStat = stat + + foundKmsTag := false + for ii, v := range stat.Metadata { + if ii == amzObjectSSEKMSKeyID { + foundKmsTag = true + if !strings.HasSuffix(v, sseKMSKeyName) { + fatalMsgOnly("invalid KMS key for object "+files[i].MinioLS.Key, t) + break + } + } + } + + if !foundKmsTag { + fatalMsgOnly(amzObjectSSEKMSKeyID+" not found for object "+files[i].MinioLS.Key, t) + } + + } +} + +func RemoveObjectWithSSEKMS(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encrm-kms", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-kms="+sseTestBucket+"="+sseKMSKeyName, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "rm", + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "stat", + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfNoErrorWMsg(err, out, t) +} + +func CatObjectWithSSECWithoutKey(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encerror", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cat", + sseTestBucket+"/"+file.fileNameWithoutPath, + file.diskFile.Name()+"-cat", + ) + fatalIfNoErrorWMsg(err, out, t) +} + +func RemoveObjectWithSSEC(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encrm", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "rm", + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "stat", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfNoErrorWMsg(err, out, t) +} + +func MirrorTempDirectoryUsingSSEC(t *testing.T) { + MirrorBucket := CreateBucket(t) + + subDir := "encmirror" + + f1 := createFile(newTestFile{ + addToGlobalFileMap: false, + subDir: subDir, + tag: "encmirror1", + sizeInMBS: 1, + }) + + f2 := createFile(newTestFile{ + addToGlobalFileMap: false, + subDir: subDir, + tag: "encmirror2", + sizeInMBS: 2, + }) + + f3 := createFile(newTestFile{ + addToGlobalFileMap: false, + subDir: subDir, + tag: "encmirror3", + sizeInMBS: 4, + }) + + files := append([]*testFile{}, f1, f2, f3) + + out, err := RunMC( + "mirror", + "--enc-c="+MirrorBucket+"="+sseBaseEncodedKey, + tempDir+string(os.PathSeparator)+subDir, + MirrorBucket, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC("ls", "-r", MirrorBucket) + fatalIfError(err, t) + + fileList, err := parseLSJSONOutput(out) + fatalIfError(err, t) + + for i, f := range files { + fileFound := false + + for _, o := range fileList { + if o.Key == f.fileNameWithoutPath { + files[i].MinioLS = o + fileFound = true + } + } + + if !fileFound { + fatalMsgOnly(fmt.Sprintf( + "File was not uploaded: %s", + f.fileNameWithPrefix, + ), t) + } + + out, err := RunMC( + "stat", + "--enc-c="+MirrorBucket+"="+sseBaseEncodedKey, + MirrorBucket+"/"+files[i].MinioLS.Key, + ) + fatalIfError(err, t) + _, err = parseStatSingleObjectJSONOutput(out) + fatalIfError(err, t) + + } +} + +func CopyObjectWithSSECToNewBucketWithNewKey(t *testing.T) { + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "encbucketcopy", + sizeInMBS: 1, + }) + + out, err := RunMC( + "cp", + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + file.diskFile.Name(), + sseTestBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + TargetSSEBucket := CreateBucket(t) + + out, err = RunMC( + "cp", + "--enc-c="+TargetSSEBucket+"="+sseBaseEncodedKey2, + "--enc-c="+sseTestBucket+"="+sseBaseEncodedKey, + sseTestBucket+"/"+file.fileNameWithoutPath, + TargetSSEBucket+"/"+file.fileNameWithoutPath, + ) + fatalIfErrorWMsg(err, out, t) + + out, err = RunMC( + "cp", + "--enc-c="+TargetSSEBucket+"="+sseBaseEncodedKey2, + TargetSSEBucket+"/"+file.fileNameWithoutPath, + file.diskFile.Name()+".download", + ) + fatalIfErrorWMsg(err, out, t) + + md5s, err := openFileAndGetMd5Sum(file.diskFile.Name() + ".download") + fatalIfError(err, t) + if md5s != file.md5Sum { + fatalMsgOnly(fmt.Sprintf("expecting md5sum (%s) but got sum (%s)", file.md5Sum, md5s), t) + } +} + +func uploadAllFiles(t *testing.T) { + for _, v := range fileMap { + parameters := make([]string, 0) + parameters = append(parameters, "cp") + + if v.storageClass != "" { + parameters = append(parameters, "--storage-class", v.storageClass) + } + + if len(v.metaData) > 0 { + parameters = append(parameters, "--attr") + meta := "" + for i, v := range v.metaData { + meta += i + "=" + v + ";" + } + meta = strings.TrimSuffix(meta, ";") + parameters = append(parameters, meta) + } + if len(v.tags) > 0 { + parameters = append(parameters, "--tags") + tags := "" + for i, v := range v.tags { + tags += i + "=" + v + ";" + } + tags = strings.TrimSuffix(tags, ";") + parameters = append(parameters, tags) + } + + parameters = append(parameters, v.diskFile.Name()) + + if v.prefix != "" { + parameters = append( + parameters, + mainTestBucket+"/"+v.fileNameWithPrefix, + ) + } else { + parameters = append( + parameters, + mainTestBucket+"/"+v.fileNameWithoutPath, + ) + } + + _, err := RunMC(parameters...) + if err != nil { + t.Fatal(err) + } + } +} + +func OD(t *testing.T) { + LocalBucketPath := CreateBucket(t) + + file := fileMap["65M"] + out, err := RunMC( + "od", + "if="+file.diskFile.Name(), + "of="+LocalBucketPath+"/od/"+file.fileNameWithoutPath, + "parts=10", + ) + + fatalIfError(err, t) + odMsg, err := parseSingleODMessageJSONOutput(out) + fatalIfError(err, t) + + if odMsg.TotalSize != file.diskStat.Size() { + t.Fatalf( + "Expected (%d) bytes to be uploaded but only uploaded (%d) bytes", + odMsg.TotalSize, + file.diskStat.Size(), + ) + } + + if odMsg.Parts != 10 { + t.Fatalf( + "Expected upload parts to be (10) but they were (%d)", + odMsg.Parts, + ) + } + + if odMsg.Type != "FStoS3" { + t.Fatalf( + "Expected type to be (FStoS3) but got (%s)", + odMsg.Type, + ) + } + + if odMsg.PartSize != uint64(file.diskStat.Size())/10 { + t.Fatalf( + "Expected part size to be (%d) but got (%d)", + file.diskStat.Size()/10, + odMsg.PartSize, + ) + } + + out, err = RunMC( + "od", + "of="+file.diskFile.Name(), + "if="+LocalBucketPath+"/od/"+file.fileNameWithoutPath, + "parts=10", + ) + + fatalIfError(err, t) + fmt.Println(out) + odMsg, err = parseSingleODMessageJSONOutput(out) + fatalIfError(err, t) + + if odMsg.TotalSize != file.diskStat.Size() { + t.Fatalf( + "Expected (%d) bytes to be uploaded but only uploaded (%d) bytes", + odMsg.TotalSize, + file.diskStat.Size(), + ) + } + + if odMsg.Parts != 10 { + t.Fatalf( + "Expected upload parts to be (10) but they were (%d)", + odMsg.Parts, + ) + } + + if odMsg.Type != "S3toFS" { + t.Fatalf( + "Expected type to be (FStoS3) but got (%s)", + odMsg.Type, + ) + } + + if odMsg.PartSize != uint64(file.diskStat.Size())/10 { + t.Fatalf( + "Expected part size to be (%d) but got (%d)", + file.diskStat.Size()/10, + odMsg.PartSize, + ) + } +} + +func MvFromDiskToMinio(t *testing.T) { + LocalBucketPath := CreateBucket(t) + + file := createFile(newTestFile{ + addToGlobalFileMap: false, + tag: "10Move", + prefix: "", + extension: ".txt", + storageClass: "", + sizeInMBS: 1, + metaData: map[string]string{"name": "10Move"}, + tags: map[string]string{"tag1": "10Move-tag"}, + }) + + out, err := RunMC( + "mv", + file.diskFile.Name(), + LocalBucketPath+"/"+file.fileNameWithoutPath, + ) + + fatalIfError(err, t) + splitReturn := bytes.Split([]byte(out), []byte{10}) + + mvMSG, err := parseSingleCPMessageJSONOutput(string(splitReturn[0])) + fatalIfError(err, t) + + if mvMSG.TotalCount != 1 { + t.Fatalf("Expected count to be 1 but got (%d)", mvMSG.TotalCount) + } + + if mvMSG.Size != file.diskStat.Size() { + t.Fatalf( + "Expected size to be (%d) but got (%d)", + file.diskStat.Size(), + mvMSG.Size, + ) + } + + if mvMSG.Status != "success" { + t.Fatalf( + "Expected status to be (success) but got (%s)", + mvMSG.Status, + ) + } + + statMSG, err := parseSingleAccountStatJSONOutput(string(splitReturn[1])) + fatalIfError(err, t) + + if statMSG.Transferred != file.diskStat.Size() { + t.Fatalf( + "Expected transfeered to be (%d) but got (%d)", + file.diskStat.Size(), + statMSG.Transferred, + ) + } + + if statMSG.Total != file.diskStat.Size() { + t.Fatalf( + "Expected total to be (%d) but got (%d)", + file.diskStat.Size(), + statMSG.Total, + ) + } + + if statMSG.Status != "success" { + t.Fatalf( + "Expected status to be (success) but got (%s)", + statMSG.Status, + ) + } +} + +func DUBucket(t *testing.T) { + var totalFileSize int64 + for _, v := range fileMap { + totalFileSize += v.MinioStat.Size + } + + out, err := RunMC("du", mainTestBucket) + fatalIfError(err, t) + + duList, err := parseDUJSONOutput(out) + fatalIfError(err, t) + if len(duList) != 1 { + fatalMsgOnly("Expected 1 result to be returned", t) + } + if duList[0].Size != totalFileSize { + fatalMsgOnly( + fmt.Sprintf("total size to be %d but got %d", totalFileSize, duList[0].Size), + t, + ) + } +} + +func LSObjects(t *testing.T) { + out, err := RunMC("ls", "-r", mainTestBucket) + fatalIfError(err, t) + + fileList, err := parseLSJSONOutput(out) + fatalIfError(err, t) + + for i, f := range fileMap { + fileFound := false + + for _, o := range fileList { + if o.Key == f.fileNameWithPrefix { + fileMap[i].MinioLS = o + fileFound = true + } + } + + if !fileFound { + t.Fatalf("File was not uploaded: %s", f.fileNameWithPrefix) + } + } +} + +func StatObjects(t *testing.T) { + for i, v := range fileMap { + + out, err := RunMC( + "stat", + mainTestBucket+"/"+v.fileNameWithPrefix, + ) + fatalIfError(err, t) + + fileMap[i].MinioStat, err = parseStatSingleObjectJSONOutput(out) + fatalIfError(err, t) + + if fileMap[i].MinioStat.Key == "" { + t.Fatalf("Unable to stat Minio object (%s)", v.fileNameWithPrefix) + } + + } +} + +func ValidateFileMetaData(t *testing.T) { + for _, f := range fileMap { + validateFileLSInfo(t, f) + validateObjectMetaData(t, f) + // validateContentType(t, f) + } +} + +func FindObjects(t *testing.T) { + out, err := RunMC("find", mainTestBucket) + fatalIfError(err, t) + + findList, err := parseFindJSONOutput(out) + fatalIfError(err, t) + + for _, v := range fileMap { + + found := false + for _, vv := range findList { + if strings.HasSuffix(vv.Key, v.MinioLS.Key) { + found = true + } + } + + if !found { + t.Fatalf("File (%s) not found by 'find' command", v.MinioLS.Key) + } + } +} + +func FindObjectsUsingName(t *testing.T) { + for _, v := range fileMap { + + out, err := RunMC( + "find", + mainTestBucket, + "--name", + v.fileNameWithoutPath, + ) + + fatalIfError(err, t) + info, err := parseFindSingleObjectJSONOutput(out) + fatalIfError(err, t) + if !strings.HasSuffix(info.Key, v.MinioLS.Key) { + t.Fatalf("Invalid key (%s) when searching for (%s)", info.Key, v.MinioLS.Key) + } + + } +} + +func FindObjectsUsingNameAndFilteringForTxtType(t *testing.T) { + out, err := RunMC( + "find", + mainTestBucket, + "--name", + "*.txt", + ) + fatalIfError(err, t) + + findList, err := parseFindJSONOutput(out) + fatalIfError(err, t) + + for _, v := range fileMap { + if v.extension != ".txt" { + continue + } + + found := false + for _, vv := range findList { + if strings.HasSuffix(vv.Key, v.MinioLS.Key) { + found = true + } + } + + if !found { + t.Fatalf("File (%s) not found by 'find' command", v.MinioLS.Key) + } + } +} + +func FindObjectsSmallerThan64Mebibytes(t *testing.T) { + out, err := RunMC( + "find", + mainTestBucket, + "--smaller", + "64MB", + ) + fatalIfError(err, t) + + findList, err := parseFindJSONOutput(out) + fatalIfError(err, t) + + for _, v := range fileMap { + if v.diskStat.Size() > GetMBSizeInBytes(64) { + continue + } + + found := false + for _, vv := range findList { + if strings.HasSuffix(vv.Key, v.MinioLS.Key) { + found = true + } + } + + if !found { + t.Fatalf("File (%s) not found by 'find' command", v.MinioLS.Key) + } + } +} + +func FindObjectsLargerThan64Mebibytes(t *testing.T) { + out, err := RunMC( + "find", + mainTestBucket, + "--larger", + "64MB", + ) + fatalIfError(err, t) + + findList, err := parseFindJSONOutput(out) + fatalIfError(err, t) + + for _, v := range fileMap { + if v.diskStat.Size() < GetMBSizeInBytes(64) { + continue + } + + found := false + for _, vv := range findList { + if strings.HasSuffix(vv.Key, v.MinioLS.Key) { + found = true + } + } + + if !found { + t.Fatalf("File (%s) not found by 'find' command", v.MinioLS.Key) + } + } +} + +func FindObjectsOlderThan1d(t *testing.T) { + out, err := RunMC( + "find", + mainTestBucket, + "--older-than", + "1d", + ) + fatalIfError(err, t) + + findList, err := parseFindJSONOutput(out) + fatalIfError(err, t) + + if len(findList) > 0 { + t.Fatalf("We should not have found any files which are older then 1 day") + } +} + +func FindObjectsNewerThen1d(t *testing.T) { + out, err := RunMC( + "find", + mainTestBucket, + "--newer-than", + "1d", + ) + fatalIfError(err, t) + + findList, err := parseFindJSONOutput(out) + fatalIfError(err, t) + + for _, v := range fileMap { + + found := false + for _, vv := range findList { + if strings.HasSuffix(vv.Key, v.MinioLS.Key) { + found = true + } + } + + if !found { + t.Fatalf("File (%s) not found by 'find' command", v.MinioLS.Key) + } + } +} + +func GetObjectsAndCompareMD5(t *testing.T) { + for _, v := range fileMap { + + // make sure old downloads are not in our way + _ = os.Remove(tempDir + "/" + v.fileNameWithoutPath + ".downloaded") + + _, err := RunMC( + "cp", + mainTestBucket+"/"+v.fileNameWithPrefix, + tempDir+"/"+v.fileNameWithoutPath+".downloaded", + ) + fatalIfError(err, t) + + downloadedFile, err := os.Open( + tempDir + "/" + v.fileNameWithoutPath + ".downloaded", + ) + fatalIfError(err, t) + + fileBytes, err := io.ReadAll(downloadedFile) + fatalIfError(err, t) + md5sum := GetMD5Sum(fileBytes) + + if v.md5Sum != md5sum { + t.Fatalf( + "The downloaded file md5sum is wrong: original-md5(%s) downloaded-md5(%s)", + v.md5Sum, + md5sum, + ) + } + } +} + +func CreateBucketUsingInvalidSymbols(t *testing.T) { + bucketNameMap := make(map[string]string) + bucketNameMap["name-too-big"] = randomLargeString + bucketNameMap["!"] = "symbol!" + bucketNameMap["@"] = "symbol@" + bucketNameMap["#"] = "symbol#" + bucketNameMap["$"] = "symbol$" + bucketNameMap["%"] = "symbol%" + bucketNameMap["^"] = "symbol^" + bucketNameMap["&"] = "symbol&" + bucketNameMap["*"] = "symbol*" + bucketNameMap["("] = "symbol(" + bucketNameMap[")"] = "symbol)" + bucketNameMap["{"] = "symbol{" + bucketNameMap["}"] = "symbol}" + bucketNameMap["["] = "symbol[" + bucketNameMap["]"] = "symbol]" + + for _, v := range bucketNameMap { + _, err := RunMC("mb", defaultAlias+"/"+v) + if err == nil { + t.Fatalf("We should not have been able to create a bucket with the name: %s", v) + } + } +} + +func RemoveBucketThatDoesNotExist(t *testing.T) { + randomID := uuid.NewString() + out, _ := RunMC( + "rb", + defaultAlias+"/"+randomID, + ) + errMSG, _ := parseSingleErrorMessageJSONOutput(out) + validateErrorMSGValues( + t, + errMSG, + "error", + "Unable to validate", + "does not exist", + ) +} + +func RemoveBucketWithNameTooLong(t *testing.T) { + randomID := uuid.NewString() + out, _ := RunMC( + "rb", + defaultAlias+"/"+randomID+randomID, + ) + errMSG, _ := parseSingleErrorMessageJSONOutput(out) + validateErrorMSGValues( + t, + errMSG, + "error", + "Unable to validate", + "Bucket name cannot be longer than 63 characters", + ) +} + +func UploadToUnknownBucket(t *testing.T) { + randomBucketID := uuid.NewString() + parameters := append( + []string{}, + "cp", + fileMap["1M"].diskFile.Name(), + defaultAlias+"/"+randomBucketID+"-test-should-not-exist"+"/"+fileMap["1M"].fileNameWithoutPath, + ) + + _, err := RunMC(parameters...) + if err == nil { + t.Fatalf("We should not have been able to upload to bucket: %s", randomBucketID) + } +} + +func preRunCleanup() { + for i := range tmpNameMap { + _, _ = RunMC("rb", "--force", "--dangerous", defaultAlias+"/test-"+i) + } +} + +func postRunCleanup(t *testing.T) { + var err error + var berr error + var out string + + err = os.RemoveAll(tempDir) + if err != nil { + fmt.Println(err) + } + + for _, v := range bucketList { + out, berr = RunMC("rb", "--force", "--dangerous", v) + if berr != nil { + fmt.Printf("Unable to remove bucket (%s) err: %s // out: %s", v, berr, out) + } + } + + for _, v := range userList { + _, _ = RunMC( + "admin", + "user", + "remove", + defaultAlias, + v.Username, + ) + } + + fatalIfError(berr, t) + fatalIfError(err, t) +} + +func validateFileLSInfo(t *testing.T, file *testFile) { + if file.diskStat.Size() != int64(file.MinioLS.Size) { + t.Fatalf( + "File and minio object are not the same size - Object (%d) vs File (%d)", + file.MinioLS.Size, + file.diskStat.Size(), + ) + } + // if file.md5Sum != file.findOutput.Etag { + // t.Fatalf("File and file.findOutput do not have the same md5Sum - Object (%s) vs File (%s)", file.findOutput.Etag, file.md5Sum) + // } + if file.storageClass != "" { + if file.storageClass != file.MinioLS.StorageClass { + t.Fatalf( + "File and minio object do not have the same storage class - Object (%s) vs File (%s)", + file.MinioLS.StorageClass, + file.storageClass, + ) + } + } else { + if file.MinioLS.StorageClass != "STANDARD" { + t.Fatalf( + "Minio object was expected to have storage class (STANDARD) but it was (%s)", + file.MinioLS.StorageClass, + ) + } + } +} + +func validateObjectMetaData(t *testing.T, file *testFile) { + for i, v := range file.metaData { + found := false + + for ii, vv := range file.MinioStat.Metadata { + if metaPrefix+strings.Title(i) == ii { + found = true + if v != vv { + fmt.Println("------------------------") + fmt.Println("META CHECK") + fmt.Println(file.MinioStat.Metadata) + fmt.Println(file.metaData) + fmt.Println("------------------------") + t.Fatalf("Meta values are not the same v1(%s) v2(%s)", v, vv) + } + } + } + + if !found { + fmt.Println("------------------------") + fmt.Println("META CHECK") + fmt.Println(file.MinioStat.Metadata) + fmt.Println(file.metaData) + fmt.Println("------------------------") + t.Fatalf("Meta tag(%s) not found", i) + } + + } +} + +// func validateContentType(t *testing.T, file *testFile) { +// value, ok := file.MinioStat.Metadata["Content-Type"] +// if !ok { +// t.Fatalf("File (%s) did not have a content type", file.fileNameWithPrefix) +// return +// } +// +// contentType := mime.TypeByExtension(file.extension) +// if contentType != value { +// log.Println(file) +// log.Println(file.MinioLS) +// log.Println(file.extension) +// log.Println(file.MinioStat) +// t.Fatalf("Content types on file (%s) do not match, extension(%s) File(%s) MinIO object(%s)", file.fileNameWithPrefix, file.extension, contentType, file.MinioStat.Metadata["Content-Type"]) +// } +// } + +func GetSource(skip int) (out string) { + pc := make([]uintptr, 3) // at least 1 entry needed + runtime.Callers(skip, pc) + f := runtime.FuncForPC(pc[0]) + file, line := f.FileLine(pc[0]) + sn := strings.Split(f.Name(), ".") + var name string + if sn[len(sn)-1] == "func1" { + name = sn[len(sn)-2] + } else { + name = sn[len(sn)-1] + } + out = file + ":" + fmt.Sprint(line) + ":" + name + return +} + +func GetMD5Sum(data []byte) string { + md5Writer := md5.New() + md5Writer.Write(data) + return fmt.Sprintf("%x", md5Writer.Sum(nil)) +} + +func curlFatalIfNoErrorTag(msg string, t *testing.T) { + if !strings.Contains(msg, "") { + fmt.Println(failIndicator) + fmt.Println(msg) + t.Fatal(msg) + } +} + +func fatalMsgOnly(msg string, t *testing.T) { + fmt.Println(failIndicator) + t.Fatal(msg) +} + +func fatalIfNoErrorWMsg(err error, msg string, t *testing.T) { + if err == nil { + fmt.Println(failIndicator) + fmt.Println(msg) + t.Fatal(err) + } +} + +func fatalIfErrorWMsg(err error, msg string, t *testing.T) { + if err != nil { + fmt.Println(failIndicator) + fmt.Println(msg) + t.Fatal(err) + } +} + +func fatalIfError(err error, t *testing.T) { + if err != nil { + fmt.Println(failIndicator) + t.Fatal(err) + } +} + +func parseFindJSONOutput(out string) (findList []*findMessage, err error) { + findList = make([]*findMessage, 0) + splitList := bytes.Split([]byte(out), []byte{10}) + + for _, v := range splitList { + if len(v) < 1 { + continue + } + line := new(findMessage) + err = json.Unmarshal(v, line) + if err != nil { + return + } + findList = append(findList, line) + } + + if printRawOut { + fmt.Println("FIND LIST ------------------------------") + for _, v := range findList { + fmt.Println(v) + } + fmt.Println(" ------------------------------") + } + return +} + +func parseDUJSONOutput(out string) (duList []duMessage, err error) { + duList = make([]duMessage, 0) + splitList := bytes.Split([]byte(out), []byte{10}) + + for _, v := range splitList { + if len(v) < 1 { + continue + } + line := duMessage{} + err = json.Unmarshal(v, &line) + if err != nil { + return + } + duList = append(duList, line) + } + + if printRawOut { + fmt.Println("DU LIST ------------------------------") + for _, v := range duList { + fmt.Println(v) + } + fmt.Println(" ------------------------------") + } + return +} + +func parseLSJSONOutput(out string) (lsList []contentMessage, err error) { + lsList = make([]contentMessage, 0) + splitList := bytes.Split([]byte(out), []byte{10}) + + for _, v := range splitList { + if len(v) < 1 { + continue + } + line := contentMessage{} + err = json.Unmarshal(v, &line) + if err != nil { + return + } + lsList = append(lsList, line) + } + + if printRawOut { + fmt.Println("LS LIST ------------------------------") + for _, v := range lsList { + fmt.Println(v) + } + fmt.Println(" ------------------------------") + } + return +} + +func parseFindSingleObjectJSONOutput(out string) (findInfo contentMessage, err error) { + err = json.Unmarshal([]byte(out), &findInfo) + if err != nil { + return + } + + if printRawOut { + fmt.Println("FIND SINGLE OBJECT ------------------------------") + fmt.Println(findInfo) + fmt.Println(" ------------------------------") + } + return +} + +func parseStatSingleObjectJSONOutput(out string) (stat statMessage, err error) { + err = json.Unmarshal([]byte(out), &stat) + if err != nil { + return + } + + if printRawOut { + fmt.Println("STAT ------------------------------") + fmt.Println(stat) + fmt.Println(" ------------------------------") + } + return +} + +// We have to wrap the error output because the console +// printing mechanism for json marshals into an anonymous +// object before printing, see cmd/error.go line 70 +type errorMessageWrapper struct { + Error errorMessage `json:"error"` + Status string `json:"status"` +} + +func validateErrorMSGValues( + t *testing.T, + errMSG errorMessageWrapper, + TypeToValidate string, + MessageToValidate string, + CauseToValidate string, +) { + if TypeToValidate != "" { + if !strings.Contains(errMSG.Error.Type, TypeToValidate) { + t.Fatalf( + "Expected error.Error.Type to contain (%s) - but got (%s)", + TypeToValidate, + errMSG.Error.Type, + ) + } + } + if MessageToValidate != "" { + if !strings.Contains(errMSG.Error.Message, MessageToValidate) { + t.Fatalf( + "Expected error.Error.Message to contain (%s) - but got (%s)", + MessageToValidate, + errMSG.Error.Message, + ) + } + } + if CauseToValidate != "" { + if !strings.Contains(errMSG.Error.Cause.Message, CauseToValidate) { + t.Fatalf( + "Expected error.Error.Cause.Message to contain (%s) - but got (%s)", + CauseToValidate, + errMSG.Error.Cause.Message, + ) + } + } +} + +func parseUserMessageListOutput(out string) (users []*userMessage, err error) { + users = make([]*userMessage, 0) + splitList := bytes.Split([]byte(out), []byte{10}) + for _, v := range splitList { + if len(v) < 1 { + continue + } + msg := new(userMessage) + err = json.Unmarshal(v, msg) + if err != nil { + return + } + users = append(users, msg) + } + + if printRawOut { + fmt.Println("USER LIST ------------------------------") + for _, v := range users { + fmt.Println(v) + } + fmt.Println(" ------------------------------") + } + + return +} + +func parseShareMessageFromJSONOutput(out string) (share *shareMessage, err error) { + share = new(shareMessage) + err = json.Unmarshal([]byte(out), share) + return +} + +func parseSingleErrorMessageJSONOutput(out string) (errMSG errorMessageWrapper, err error) { + err = json.Unmarshal([]byte(out), &errMSG) + if err != nil { + return + } + + fmt.Println("ERROR ------------------------------") + fmt.Println(errMSG) + fmt.Println(" ------------------------------") + return +} + +func parseSingleODMessageJSONOutput(out string) (odMSG odMessage, err error) { + err = json.Unmarshal([]byte(out), &odMSG) + if err != nil { + return + } + + return +} + +func parseSingleAccountStatJSONOutput(out string) (stat accountStat, err error) { + err = json.Unmarshal([]byte(out), &stat) + if err != nil { + return + } + + return +} + +func parseSingleCPMessageJSONOutput(out string) (cpMSG copyMessage, err error) { + err = json.Unmarshal([]byte(out), &cpMSG) + if err != nil { + return + } + + return +} + +type newTestFile struct { + tag string // The tag used to identify the file inside the FileMap. This tag is also used in the objects name. + prefix string // Prefix for the object name ( not including the object name itself) + extension string + storageClass string + sizeInMBS int + // uploadShouldFail bool + metaData map[string]string + tags map[string]string + + addToGlobalFileMap bool + // sub directory path to place the file in + // tempDir+/+subDir + subDir string +} + +type testFile struct { + newTestFile + + // File on disk + diskFile *os.File + // File info on disk + diskStat os.FileInfo + // md5sum at the time of creation + md5Sum string + // File name without full path + fileNameWithoutPath string + // File name with assigned prefix + fileNameWithPrefix string + + // These field are not automatically populated unless + // the file is created at the initialization phase of + // the test suite: testsThatDependOnOneAnother() + // Minio mc stat output + MinioStat statMessage + // Minio mc ls output + MinioLS contentMessage +} + +func (f *testFile) String() (out string) { + out = fmt.Sprintf( + "Size: %d || Name: %s || md5Sum: %s", + f.diskStat.Size(), + f.fileNameWithoutPath, + f.md5Sum, + ) + return +} + +func createFile(nf newTestFile) (newTestFile *testFile) { + var newFile *os.File + var err error + if nf.subDir != "" { + + err = os.MkdirAll( + tempDir+string(os.PathSeparator)+nf.subDir, + 0o755) + if err != nil { + log.Println("Could not make additional dir:", err) + os.Exit(1) + } + + newFile, err = os.CreateTemp( + tempDir+string(os.PathSeparator)+nf.subDir, + nf.tag+"-*"+nf.extension, + ) + + } else { + newFile, err = os.CreateTemp(tempDir, nf.tag+"-*"+nf.extension) + } + + if err != nil { + log.Println("Could not make file:", err) + os.Exit(1) + } + + md5Writer := md5.New() + for i := 0; i < nf.sizeInMBS; i++ { + n, err := newFile.Write(oneMBSlice[:]) + mn, merr := md5Writer.Write(oneMBSlice[:]) + if err != nil || merr != nil { + log.Println(err) + log.Println(merr) + return nil + } + if n != len(oneMBSlice) { + log.Println("Did not write 1MB to file") + return nil + } + if mn != len(oneMBSlice) { + log.Println("Did not write 1MB to md5sum writer") + return nil + } + } + splitName := strings.Split(newFile.Name(), string(os.PathSeparator)) + fileNameWithoutPath := splitName[len(splitName)-1] + md5sum := fmt.Sprintf("%x", md5Writer.Sum(nil)) + stats, err := newFile.Stat() + if err != nil { + return nil + } + newTestFile = &testFile{ + md5Sum: md5sum, + fileNameWithoutPath: fileNameWithoutPath, + diskFile: newFile, + diskStat: stats, + } + + newTestFile.tag = nf.tag + newTestFile.metaData = nf.metaData + newTestFile.storageClass = nf.storageClass + newTestFile.sizeInMBS = nf.sizeInMBS + newTestFile.tags = nf.tags + newTestFile.prefix = nf.prefix + newTestFile.extension = nf.extension + + if nf.prefix != "" { + newTestFile.fileNameWithPrefix = nf.prefix + "/" + fileNameWithoutPath + } else { + newTestFile.fileNameWithPrefix = fileNameWithoutPath + } + if nf.addToGlobalFileMap { + fileMap[nf.tag] = newTestFile + } + return newTestFile +} + +func BuildCLI() error { + wd, _ := os.Getwd() + fmt.Println("WORKING DIR:", wd) + fmt.Println("go build -o", mcCmd, buildPath) + os.Remove(mcCmd) + out, err := exec.Command("go", "build", "-o", mcCmd, buildPath).CombinedOutput() + if err != nil { + log.Println("BUILD OUT:", out) + log.Println(err) + panic(err) + } + err = os.Chmod(mcCmd, 0o777) + if err != nil { + panic(err) + } + return nil +} + +func RunMC(parameters ...string) (out string, err error) { + var outBytes []byte + var outErr error + + fmt.Println("") + fmt.Println(time.Now().Format("2006-01-02T15:04:05.000"), "||", GetSource(3)) + fmt.Println(mcCmd, strings.Join(preCmdParameters, " "), strings.Join(parameters, " ")) + + outBytes, outErr = exec.Command(mcCmd, append(preCmdParameters, parameters...)...).CombinedOutput() + if printRawOut { + fmt.Println(string(outBytes)) + } + out = string(outBytes) + err = outErr + return +} + +func RunCommand(cmd string, parameters ...string) (out string, err error) { + fmt.Println("") + fmt.Println(time.Now().Format("2006-01-02T15:04:05.000"), "||", GetSource(3)) + fmt.Println(cmd, strings.Join(parameters, " ")) + var outBytes []byte + var outErr error + + outBytes, outErr = exec.Command(cmd, parameters...).CombinedOutput() + if printRawOut { + fmt.Println(string(outBytes)) + } + out = string(outBytes) + err = outErr + return +} diff --git a/cmd/typed-errors.go b/cmd/typed-errors.go index b2c7648752..281c6eebca 100644 --- a/cmd/typed-errors.go +++ b/cmd/typed-errors.go @@ -148,9 +148,46 @@ var errSourceIsDir = func(URL string) *probe.Error { return probe.NewError(sourceIsDirErr(errors.New(msg))).Untrace() } -type conflictSSEErr error +type sseInvalidAliasErr error -var errConflictSSE = func(sseServer, sseKeys string) *probe.Error { - err := fmt.Errorf("SSE alias '%s' overlaps with SSE-C aliases '%s'", sseServer, sseKeys) - return probe.NewError(conflictSSEErr(err)).Untrace() +var errSSEInvalidAlias = func(prefix string) *probe.Error { + msg := "SSE prefix " + prefix + " has an invalid alias." + return probe.NewError(sseInvalidAliasErr(errors.New(msg))).Untrace() +} + +type sseOverlappingAliasErr error + +var errSSEOverlappingAlias = func(prefix, overlappingPrefix string) *probe.Error { + msg := "SSE prefix " + prefix + " overlaps with " + overlappingPrefix + return probe.NewError(sseOverlappingAliasErr(errors.New(msg))).Untrace() +} + +type ssePrefixMatchErr error + +var errSSEPrefixMatch = func() *probe.Error { + msg := "SSE prefixes do not match any object paths." + return probe.NewError(ssePrefixMatchErr(errors.New(msg))).Untrace() +} + +type sseKeyMissingError error + +var errSSEKeyMissing = func() *probe.Error { + m := "SSE key is missing" + return probe.NewError(sseKeyMissingError(errors.New(m))).Untrace() +} + +type sseKMSKeyFormatErr error + +var errSSEKMSKeyFormat = func(msg string) *probe.Error { + m := "SSE key format error. " + m += msg + return probe.NewError(sseKMSKeyFormatErr(errors.New(m))).Untrace() +} + +type sseClientKeyFormatErr error + +var errSSEClientKeyFormat = func(msg string) *probe.Error { + m := "Encryption key should be 44 bytes raw base64 encoded key." + m += msg + return probe.NewError(sseClientKeyFormatErr(errors.New(m))).Untrace() } diff --git a/cmd/utils.go b/cmd/utils.go index 471034596e..dc1df1d0a3 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -29,7 +29,6 @@ import ( "os" "path/filepath" "regexp" - "sort" "strconv" "strings" "time" @@ -37,7 +36,6 @@ import ( "github.com/mattn/go-ieproxy" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/encrypt" jwtgo "github.com/golang-jwt/jwt/v4" "github.com/minio/mc/pkg/probe" @@ -213,108 +211,6 @@ func getLookupType(l string) minio.BucketLookupType { return minio.BucketLookupAuto } -// struct representing object prefix and sse keys association. -type prefixSSEPair struct { - Prefix string - SSE encrypt.ServerSide -} - -// parse and validate encryption keys entered on command line -func parseAndValidateEncryptionKeys(sseKeys, sse string) (encMap map[string][]prefixSSEPair, err *probe.Error) { - encMap, err = parseEncryptionKeys(sseKeys) - if err != nil { - return nil, err - } - if sse != "" { - for _, prefix := range strings.Split(sse, ",") { - alias, _ := url2Alias(prefix) - encMap[alias] = append(encMap[alias], prefixSSEPair{ - Prefix: prefix, - SSE: encrypt.NewSSE(), - }) - } - } - for alias, ps := range encMap { - if hostCfg := mustGetHostConfig(alias); hostCfg == nil { - for _, p := range ps { - return nil, probe.NewError(errors.New("SSE prefix " + p.Prefix + " has invalid alias")) - } - } - } - return encMap, nil -} - -// parse list of comma separated alias/prefix=sse key values entered on command line and -// construct a map of alias to prefix and sse pairs. -func parseEncryptionKeys(sseKeys string) (encMap map[string][]prefixSSEPair, err *probe.Error) { - encMap = make(map[string][]prefixSSEPair) - if sseKeys == "" { - return - } - prefix := "" - index := 0 // start index of prefix - vs := 0 // start index of sse-c key - sseKeyLen := 32 - delim := 1 - k := len(sseKeys) - for index < k { - i := strings.Index(sseKeys[index:], "=") - if i == -1 { - return nil, probe.NewError(errors.New("SSE-C prefix should be of the form prefix1=key1,... ")) - } - prefix = sseKeys[index : index+i] - alias, _ := url2Alias(prefix) - vs = i + 1 + index - if vs+32 > k { - return nil, probe.NewError(errors.New("SSE-C key should be 32 bytes long")) - } - if (vs+sseKeyLen < k) && sseKeys[vs+sseKeyLen] != ',' { - return nil, probe.NewError(errors.New("SSE-C prefix=secret should be delimited by , and secret should be 32 bytes long")) - } - sseKey := sseKeys[vs : vs+sseKeyLen] - if _, ok := encMap[alias]; !ok { - encMap[alias] = make([]prefixSSEPair, 0) - } - sse, e := encrypt.NewSSEC([]byte(sseKey)) - if e != nil { - return nil, probe.NewError(e) - } - encMap[alias] = append(encMap[alias], prefixSSEPair{ - Prefix: prefix, - SSE: sse, - }) - // advance index sseKeyLen + delim bytes for the next key start - index = vs + sseKeyLen + delim - } - - // Sort encryption keys in descending order of prefix length - for _, encKeys := range encMap { - sort.Sort(byPrefixLength(encKeys)) - } - - // Success. - return encMap, nil -} - -// byPrefixLength implements sort.Interface. -type byPrefixLength []prefixSSEPair - -func (p byPrefixLength) Len() int { return len(p) } -func (p byPrefixLength) Less(i, j int) bool { - return len(p[i].Prefix) > len(p[j].Prefix) -} -func (p byPrefixLength) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// get SSE Key if object prefix matches with given resource. -func getSSE(resource string, encKeys []prefixSSEPair) encrypt.ServerSide { - for _, k := range encKeys { - if strings.HasPrefix(resource, k.Prefix) { - return k.SSE - } - } - return nil -} - // Return true if target url is a part of a source url such as: // alias/bucket/ and alias/bucket/dir/, however func isURLContains(srcURL, tgtURL, sep string) bool { diff --git a/cmd/utils_test.go b/cmd/utils_test.go index d190cf0f55..699c8503a7 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -20,87 +20,8 @@ package cmd import ( "reflect" "testing" - - "github.com/minio/minio-go/v7/pkg/encrypt" ) -func TestParseEncryptionKeys(t *testing.T) { - sseKey1, err := encrypt.NewSSEC([]byte("32byteslongsecretkeymustbegiven2")) - if err != nil { - t.Fatal(err) - } - sseKey2, err := encrypt.NewSSEC([]byte("32byteslongsecretkeymustbegiven1")) - if err != nil { - t.Fatal(err) - } - sseSpaceKey1, err := encrypt.NewSSEC([]byte("32byteslongsecret mustbegiven1")) - if err != nil { - t.Fatal(err) - } - sseCommaKey1, err := encrypt.NewSSEC([]byte("32byteslongsecretkey,ustbegiven1")) - if err != nil { - t.Fatal(err) - } - testCases := []struct { - encryptionKey string - expectedEncMap map[string][]prefixSSEPair - success bool - }{ - { - encryptionKey: "myminio1/test2=32byteslongsecretkeymustbegiven2", - expectedEncMap: map[string][]prefixSSEPair{"myminio1": {{ - Prefix: "myminio1/test2", - SSE: sseKey1, - }}}, - success: true, - }, - { - encryptionKey: "myminio1/test2=32byteslongsecretkeymustbegiven", - expectedEncMap: nil, - success: false, - }, - { - encryptionKey: "myminio1/test2=32byteslongsecretkey,ustbegiven1", - expectedEncMap: map[string][]prefixSSEPair{"myminio1": {{ - Prefix: "myminio1/test2", - SSE: sseCommaKey1, - }}}, - success: true, - }, - { - encryptionKey: "myminio1/test2=32byteslongsecret mustbegiven1", - expectedEncMap: map[string][]prefixSSEPair{"myminio1": {{ - Prefix: "myminio1/test2", - SSE: sseSpaceKey1, - }}}, - success: true, - }, - { - encryptionKey: "myminio1/test2=32byteslongsecretkeymustbegiven2,myminio1/test1/a=32byteslongsecretkeymustbegiven1", - expectedEncMap: map[string][]prefixSSEPair{"myminio1": {{ - Prefix: "myminio1/test1/a", - SSE: sseKey2, - }, { - Prefix: "myminio1/test2", - SSE: sseKey1, - }}}, - success: true, - }, - } - for i, testCase := range testCases { - encMap, err := parseEncryptionKeys(testCase.encryptionKey) - if err != nil && testCase.success { - t.Fatalf("Test %d: Expected success, got %s", i+1, err) - } - if err == nil && !testCase.success { - t.Fatalf("Test %d: Expected error, got success", i+1) - } - if testCase.success && !reflect.DeepEqual(encMap, testCase.expectedEncMap) { - t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedEncMap, encMap) - } - } -} - func TestParseAttribute(t *testing.T) { metaDataCases := []struct { input string diff --git a/docs/LICENSE b/docs/LICENSE deleted file mode 100644 index 2f244ac814..0000000000 --- a/docs/LICENSE +++ /dev/null @@ -1,395 +0,0 @@ -Attribution 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution 4.0 International Public License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution 4.0 International Public License ("Public License"). To the -extent this Public License may be interpreted as a contract, You are -granted the Licensed Rights in consideration of Your acceptance of -these terms and conditions, and the Licensor grants You such rights in -consideration of benefits the Licensor receives from making the -Licensed Material available under these terms and conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - d. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - e. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - f. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - g. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - h. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - i. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - j. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - k. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - 4. If You Share Adapted Material You produce, the Adapter's - License You apply must not prevent recipients of the Adapted - Material from complying with this Public License. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material; and - - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public -licenses. Notwithstanding, Creative Commons may elect to apply one of -its public licenses to material it publishes and in those instances -will be considered the “Licensor.” The text of the Creative Commons -public licenses is dedicated to the public domain under the CC0 Public -Domain Dedication. Except for the limited purpose of indicating that -material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the -public licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/docs/MAINTAINERS.md b/docs/MAINTAINERS.md deleted file mode 100644 index 8f14b13915..0000000000 --- a/docs/MAINTAINERS.md +++ /dev/null @@ -1,44 +0,0 @@ -# For maintainers only - -## Responsibilities - -Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) - -### Setup your mc Github Repository - -Fork [mc upstream](https://github.com/minio/mc/fork) source repository to your own personal repository. - -``` - -$ mkdir -p $GOPATH/src/github.com/minio -$ cd $GOPATH/src/github.com/minio -$ git clone https://github.com/$USER_ID/mc -$ - -``` - -``mc`` uses [govendor](https://github.com/kardianos/govendor) for its dependency management. - -### To manage dependencies - -#### Add new dependencies - - - Run `go get foo/bar` - - Edit your code to import foo/bar - - Run `govendor add foo/bar` from top-level folder - -#### Remove dependencies - - - Run `govendor remove foo/bar` - -#### Update dependencies - - - Run `govendor remove +vendor` - - Run to update the dependent package `go get -u foo/bar` - - Run `govendor add +external` - -### Making new releases - -`mc` doesn't follow semantic versioning style, `mc` instead uses the release date and time as the release versions. - -`make release` will generate new binary into `release` directory. diff --git a/docs/minio-admin-complete-guide.md b/docs/minio-admin-complete-guide.md deleted file mode 100644 index e937361318..0000000000 --- a/docs/minio-admin-complete-guide.md +++ /dev/null @@ -1,1175 +0,0 @@ -# MinIO Admin Complete Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -MinIO Client (mc) provides `admin` sub-command to perform administrative tasks on your MinIO deployments. - -``` -service restart and stop all MinIO servers -update update all MinIO servers -info display MinIO server information -user manage users -group manage groups -policy manage policies defined in the MinIO server -replicate manage MinIO site replication -config manage MinIO server configuration -decommission, decom manage MinIO server pool decommissioning -heal heal bucket(s) and object(s) on MinIO server -prometheus manages prometheus config -kms perform KMS management operations -bucket manage buckets defined in the MinIO server -scanner provide MinIO scanner info -top provide top like statistics for MinIO -trace show http trace for MinIO server -cluster manage MinIO cluster metadata -rebalance Manage MinIO rebalance -logs show MinIO logs -``` - -## 1. Download MinIO Client -### Docker Stable -```sh -docker pull minio/mc -docker run minio/mc admin info play -``` - -### Docker Edge -```sh -docker pull minio/mc:edge -docker run minio/mc:edge admin info server play -``` - -### Homebrew (macOS) -Install mc packages using [Homebrew](http://brew.sh/) - -```sh -brew install minio/stable/mc -mc --help -``` - -### Binary Download (GNU/Linux) -| Platform | Architecture | URL | -| ---------- | -------- |------| -|GNU/Linux|64-bit Intel|https://dl.min.io/client/mc/release/linux-amd64/mc | -||64-bit PPC|https://dl.min.io/client/mc/release/linux-ppc64le/mc | - -```sh -chmod +x mc -./mc --help -``` - -### Binary Download (Microsoft Windows) -| Platform | Architecture | URL | -| ---------- | -------- |------| -|Microsoft Windows|64-bit Intel|https://dl.min.io/client/mc/release/windows-amd64/mc.exe | - -``` -mc.exe --help -``` - -### Install from Source -Source installation is intended only for developers and advanced users. `mc update` command does not support update notifications for source based installations. Please download official releases from https://min.io/download/#minio-client. - -If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). - -```sh -go get -d github.com/minio/mc -cd ${GOPATH}/src/github.com/minio/mc -make -``` - -## 2. Run MinIO Client - -### GNU/Linux - -```sh -chmod +x mc -./mc --help -``` - -### macOS - -```sh -chmod 755 mc -./mc --help -``` - -### Microsoft Windows - -``` -mc.exe --help -``` - -## 3. Add a MinIO Storage Service -MinIO server displays URL, access and secret keys. - -#### Usage - -```sh -mc alias set [YOUR-ACCESS-KEY] [YOUR-SECRET-KEY] -``` - -Keys must be supplied by argument or standard input. - - is simply a short name to your MinIO service. MinIO end-point, access and secret keys are supplied by your MinIO service. Admin API uses "S3v4" signature and cannot be changed. - -### Examples - -1. Keys by argument - - ```sh - mc alias set minio http://192.168.1.51:9000 BKIKJAA5BMMU2RHO6IBB V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12 - ``` - -2. Keys by prompt - - ```sh - mc alias set minio http://192.168.1.51:9000 - Enter Access Key: BKIKJAA5BMMU2RHO6IBB - Enter Secret Key: V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12 - ``` - -2. Keys by pipe - - ```sh - echo -e "BKIKJAA5BMMU2RHO6IBB\nV7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12" | \ - mc alias set minio http://192.168.1.51:9000 - ``` - -## 4. Test Your Setup - -*Example:* - -Get MinIO server information for the configured alias `minio` - -```sh -mc admin info minio -● min.minio.io - Uptime: 11 hours - Version: 2020-01-17T22:08:02Z - Network: 1/1 OK - Drives: 4/4 OK - -2.1 GiB Used, 158 Buckets, 12,092 Objects -4 drives online, 0 drives offline -``` - -## 5. Everyday Use -You may add shell aliases for info, healing. - -```sh -alias minfo='mc admin info' -``` - -## 6. Global Options - -### Option [--debug] -Debug option enables debug output to console. - -*Example: Display verbose debug output for `info` command.* - -```sh -mc --debug ls play -mc: GET /minio/admin/v2/info HTTP/1.1 -Host: play.minio.io -User-Agent: MinIO (linux; amd64) madmin-go/0.0.1 mc/DEVELOPMENT.GOGET -Authorization: AWS4-HMAC-SHA256 Credential=**REDACTED**/20200120//s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=**REDACTED** -X-Amz-Content-Sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 -X-Amz-Date: 20200120T185844Z -Accept-Encoding: gzip - -mc: HTTP/1.1 200 OK -Content-Length: 1105 -Accept-Ranges: bytes -Connection: keep-alive -Content-Security-Policy: block-all-mixed-content -Content-Type: application/json -Date: Mon, 20 Jan 2020 18:58:44 GMT -Server: nginx/1.10.3 (Ubuntu) -Vary: Origin -X-Amz-Bucket-Region: us-east-1 -X-Amz-Request-Id: 15EBAD6087210B2A -X-Xss-Protection: 1; mode=block - -mc: Response Time: 381.860854ms - -● play.minio.io - Uptime: 11 hours - Version: 2020-01-17T22:08:02Z - Network: 1/1 OK - Drives: 4/4 OK - -2.1 GiB Used, 158 Buckets, 12,092 Objects -4 drives online, 0 drives offline -``` - -### Option [--json] -JSON option enables parseable output in [JSON lines](http://jsonlines.org/) format. - -*Example: MinIO server information.* - -```sh -mc admin --json info play -{ - "status": "success", - "info": { - "mode": "online", - "region": "us-east-1", - "deploymentID": "728e91fd-ed0c-4500-b13d-d143561518bf", - "buckets": { - "count": 158 - }, - "objects": { - "count": 12092 - }, - "usage": { - "size": 2249526349 - }, - "services": { - "vault": { - "status": "KMS configured using master key" - }, - "ldap": {} - }, - "backend": { - "backendType": "Erasure", - "onlineDisks": 4, - "rrSCParity": 2, - "standardSCParity": 2 - }, - "servers": [ - { - "state": "ok", - "endpoint": "play.minio.io", - "uptime": 41216, - "version": "2020-01-17T22:08:02Z", - "commitID": "b0b25d558e25608e3a604888a0a43e58e8301dfb", - "network": { - "play.minio.io": "online" - }, - "disks": [ - { - "path": "/home/play/data1", - "state": "ok", - "uuid": "c1f8dbf8-39c8-46cd-bab6-2c87d18db06a", - "totalspace": 8378122240, - "usedspace": 1410588672 - }, - { - "path": "/home/play/data2", - "state": "ok", - "uuid": "9616d28f-5f4d-47f4-9c6d-4deb0da07cad", - "totalspace": 8378122240, - "usedspace": 1410588672 - }, - { - "path": "/home/play/data3", - "state": "ok", - "uuid": "4c822d68-4d9a-4fa3-aabb-5bf5a58e5848", - "totalspace": 8378122240, - "usedspace": 1410588672 - }, - { - "path": "/home/play/data4", - "state": "ok", - "uuid": "95b5a33c-193b-4a11-b13a-a99bc1483182", - "totalspace": 8378122240, - "usedspace": 1410588672 - } - ] - } - ] - } -} -``` - -### Option [--no-color] -This option disables the color theme. It is useful for dumb terminals. - -### Option [--quiet] -Quiet option suppress chatty console output. - -### Option [--config-dir] -Use this option to set a custom config path. - -### Option [ --insecure] -Skip SSL certificate verification. - -### Option [--version] -Display the current version of `mc` installed - -### Option [--limit-upload] -limits uploads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) - -### Option [--limit-download] -limits downloads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) - -*Example: Print version of mc.* - -```sh -mc --version -mc version RELEASE.2020-04-25T00-43-23Z -``` - -## 7. Commands - -| Commands | -|:-----------------------------------------------------------------------------------| -| [**service** - restart and stop all MinIO servers](#service) | -| [**update** - updates all MinIO servers](#update) | -| [**info** - display MinIO server information](#info) | -| [**user** - manage users](#user) | -| [**group** - manage groups](#group) | -| [**policy** - manage canned policies](#policy) | -| [**replicate** - manage MinIO site replication](#replicate) | -| [**config** - manage server configuration file](#config) | -| [**decommission, decom** - manage MinIO server pool decommissioning](#decommission) | -| [**heal** - heal bucket(s) and object(s) on MinIO server](#heal) | -| [**prometheus** - manages prometheus config settings](#prometheus) | -| [**kms** - perform KMS management operations](#kms) | -| [**bucket** - manages buckets defined in the MinIO server](#bucket) | -| [**scanner** - provide MinIO scanner info](#scanner) | -| [**top** - provide top like statistics for MinIO](#top) | -| [**trace** - show http trace for MinIO server](#trace) | -| [**logs** - show MinIO logs](#logs) | -| [**cluster** - manage MinIO cluster metadata](#cluster) | -| [**rebalance** - Manage MinIO rebalance](#rebalance) | - - -### Command `update` - updates all MinIO servers -`update` command provides a way to update all MinIO servers in a cluster. You can also use a private mirror server with `update` command to update your MinIO cluster. This is useful in cases where MinIO is running in an environment that doesn't have Internet access. - -*Example: Update all MinIO servers.* -```sh -mc admin update play -Server `play` updated successfully from RELEASE.2019-08-14T20-49-49Z to RELEASE.2019-08-21T19-59-10Z -``` - -#### Steps to update MinIO using a private mirror -For using `update` command with private mirror server, you need to mirror the directory structure on `https://dl.minio.io/server/minio/release/linux-amd64/` on your private mirror server and then provide: - -```sh -mc admin update myminio https://myfavorite-mirror.com/minio-server/linux-amd64/minio.sha256sum -Server `myminio` updated successfully from RELEASE.2019-08-14T20-49-49Z to RELEASE.2019-08-21T19-59-10Z -``` - -> NOTE: -> - An alias pointing to a distributed setup this command will automatically update all MinIO servers in the cluster. -> - `update` is a disruptive operation for your MinIO service, any on-going API operations will be forcibly canceled. So, it should be used only when you are planning MinIO upgrades for your deployment. -> - It is recommended to perform a restart after `update` successfully completes. - - -### Command `service` - restart and stop all MinIO servers -`service` command provides a way to restart and stop all MinIO servers. - -> NOTE: -> - An alias pointing to a distributed setup this command will automatically execute the same actions across all servers. -> - `restart` and `stop` sub-commands are disruptive operations for your MinIO service, any on-going API operations will be forcibly canceled. So, it should be used only under administrative circumstances. Please use it with caution. - -```sh -NAME: - mc admin service - restart and stop all MinIO servers - -FLAGS: - --help, -h show help - -COMMANDS: - restart restart a MinIO cluster - stop stop a MinIO cluster - unfreeze unfreeze S3 API calls on MinIO cluster -``` - -*Example: Restart all MinIO servers.* -```sh -mc admin service restart play -Restarted `play` successfully. -``` - - -### Command `info` - Display MinIO server information -`info` command displays server information of one or many MinIO servers (under distributed cluster) - -```sh -NAME: - mc admin info - display MinIO server information - -FLAGS: - --help, -h show help -``` - -*Example: Display MinIO server information.* - -```sh -mc admin info play -● play.minio.io - Uptime: 11 hours - Version: 2020-01-17T22:08:02Z - Network: 1/1 OK - Drives: 4/4 OK - -2.1 GiB Used, 158 Buckets, 12,092 Objects -4 drives online, 0 drives offline -``` - - -### Command `policy` - Manage canned policies -`policy` command to add, remove, list policies, get info on a policy and to set a policy for a user on MinIO server. - -```sh -NAME: - mc admin policy - manage policies defined in the MinIO server - -FLAGS: - --help, -h show help - -COMMANDS: - create create a new IAM policy - remove remove an IAM policy - list list all IAM policies - info show info on an IAM policy - attach attach an IAM policy to a user or group - detach detach an IAM policy from a user or group - entities list policy association entities -``` - -*Example: List all canned policies on MinIO.* -```sh -mc admin policy list myminio/ -diagnostics -readonly -readwrite -writeonly -``` - - -*Example: Add a new policy 'listbucketsonly' on MinIO, with policy from /tmp/listbucketsonly.json.* -*When this policy is applied on a user, that user can only list the top layer buckets, but nothing else, no prefixes, no objects.* - -*First create the json file, /tmp/listbucketsonly.json, with the following information.* -``` -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:ListAllMyBuckets" - ], - "Resource": [ - "arn:aws:s3:::*" - ] - } - ] -} -``` - -*Add the policy as 'listbucketsonly' to the policy database* -```sh -mc admin policy create myminio/ listbucketsonly /tmp/listbucketsonly.json -Added policy `listbucketsonly` successfully. -``` - -*Example: Remove policy 'listbucketsonly' on MinIO.* - -```sh -mc admin policy remove myminio/ listbucketsonly -Removed policy `listbucketsonly` successfully. -``` - -*Example: Show info on a canned policy, 'writeonly'* - -```sh -mc admin policy info myminio/ writeonly -{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["s3:PutObject"],"Resource":["arn:aws:s3:::*"]}]} -``` - -*Example: Attach the canned policy.'writeonly' on a user or group* - -```sh -mc admin policy attach myminio/ writeonly user=someuser -Policy `writeonly` successfully attached to user `someuser` -``` - -*Example: Detach the canned policy.'writeonly' on a user or group* - -```sh -mc admin policy detach myminio/ writeonly group=somegroup -Policy `writeonly` successfully detached from group `somegroup` -``` - - -### Command `user` - Manage users -`user` command to add, remove, enable, disable, list users on MinIO server. - -```sh -NAME: - mc admin user - manage users - -FLAGS: - --help, -h show help - -COMMANDS: - add add a new user - disable disable user - enable enable user - remove remove user - list list all users - info display info of a user - policy export user policies in JSON format - svcacct manage service accounts - sts manage STS accounts -``` - -*Example: Add a new user 'newuser' on MinIO.* - -```sh -mc admin user add myminio/ newuser newuser123 -``` - -*Example: Add a new user 'newuser' on MinIO, using standard input.* - -```sh -mc admin user add myminio/ -Enter Access Key: newuser -Enter Secret Key: newuser123 -``` - -*Example: Disable a user 'newuser' on MinIO.* - -```sh -mc admin user disable myminio/ newuser -``` - -*Example: Enable a user 'newuser' on MinIO.* - -```sh -mc admin user enable myminio/ newuser -``` - -*Example: Remove user 'newuser' on MinIO.* - -```sh -mc admin user remove myminio/ newuser -``` - -*Example: List all users on MinIO.* - -```sh -mc admin user list --json myminio/ -{"status":"success","accessKey":"newuser","userStatus":"enabled"} -``` - -*Example: Display info of a user* - -```sh -mc admin user info myminio someuser -``` - - -### Command `replicate` - manage MinIO site replication -`replicate` command to add, update, rm sites for replication. - -```sh -NAME: - mc admin replicate - manage MinIO site replication - -FLAGS: - --help, -h show help - -COMMANDS: - add add one or more sites for replication - update modify endpoint of site participating in site replication - rm remove one or more sites from site replication - info get site replication information - status display site replication status - resync resync content to site -``` - -*Example: Add a site for cluster-level replication.* - -```sh -mc admin replicate add minio1 minio2 -``` - -*Example: Edit a site endpoint participating in cluster-level replication.* - -```sh -mc admin replicate update myminio --deployment-id c1758167-4426-454f-9aae-5c3dfdf6df64 --endpoint https://minio2:9000 -``` - -*Example: Remove site replication for all sites.* - -```sh -mc admin replicate rm minio2 --all --force -``` - -*Example: Remove site replication for site with site names alpha, baker from active cluster minio2.* - -```sh -mc admin replicate rm minio2 alpha baker --force -``` - -*Example: Get Site Replication information.* - -```sh -mc admin replicate info minio1 -``` - -*Example: Display overall site replication status.* - -```sh -mc admin replicate status minio1 -``` - -*Example: Resync bucket data from minio1 to minio2.* - -```sh -mc admin replicate resync start minio1 minio2 -``` - -*Example: Display status of resync from minio1 to minio2.* - -```sh -mc admin replicate resync status minio1 minio2 -``` - -*Example: Cancel ongoing resync of bucket data from minio1 to minio2.* - -```sh -mc admin replicate resync cancel minio1 minio2 -``` - - - -### Command `group` - Manage groups -`group` command to add, remove, info, list, enable, disable groups on MinIO server. - -```sh -NAME: - mc admin group - manage groups - -USAGE: - mc admin group COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - add add users to a new or existing group - remove remove group or members from a group - info display group info - list display list of groups - enable Enable a group - disable Disable a group -``` - -*Example: Add a pair of users to a group 'somegroup' on MinIO.* - -Group is created if it does not exist. - -```sh -mc admin group add myminio somegroup someuser1 someuser2 -``` - -*Example: Remove a pair of users from a group 'somegroup' on MinIO.* - -```sh -mc admin group remove myminio somegroup someuser1 someuser2 -``` - -*Example: Remove a group 'somegroup' on MinIO.* - -Only works if the given group is empty. - -```sh -mc admin group remove myminio somegroup -``` - -*Example: Get info on a group 'somegroup' on MinIO.* - -```sh -mc admin group info myminio somegroup -``` - -*Example: List all groups on MinIO.* - -```sh -mc admin group list myminio -``` - -*Example: Enable a group 'somegroup' on MinIO.* - -```sh -mc admin group enable myminio somegroup -``` - -*Example: Disable a group 'somegroup' on MinIO.* - -```sh -mc admin group disable myminio somegroup -``` - - -### Command `config` - Manage server configuration -`config` command to manage MinIO server configuration. - -```sh -NAME: - mc admin config - manage configuration file - -USAGE: - mc admin config COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - get interactively retrieve a config key parameters - set interactively set a config key parameters - reset interactively reset a config key parameters - history show all historic configuration changes - restore rollback back changes to a specific config history - export export all config keys to STDOUT - import import multiple config keys from STDIN - -FLAGS: - --help, -h show help -``` - -*Example: Get 'etcd' sub-system configuration.* - -```sh -mc admin config get myminio etcd -etcd endpoints= path_prefix= coredns_path=/skydns client_cert= client_cert_key= -``` - -*Example: Set specific settings on 'etcd' sub-system.* -```sh -mc admin config set myminio etcd endpoints=http://etcd.svc.cluster.local:2379 -``` - -*Example: Get entire server configuration of a MinIO server/cluster.* - -```sh -mc admin config export myminio > /tmp/my-serverconfig -``` - -*Example: Set entire server configuration of a MinIO server/cluster.* - -```sh -mc admin config import myminio < /tmp/my-serverconfig -``` - - -### Command `decommission` - Manage MinIO server pool decommissioning -`decommission` manage MinIO server pool decommissioning. - -```sh -NAME: - mc admin decommission - manage MinIO server pool decommissioning - -USAGE: - mc admin decommission COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - start start decommissioning a pool - status show current decommissioning status - cancel cancel an ongoing decommissioning of a pool - -FLAGS: - --help, -h show help -``` - -*Example: Start decommissioning a pool for removal.* - -```sh -mc admin decommission start myminio/ http://server{5...8}/disk{1...4} -``` - -*Example: Show current decommissioning status.* -```sh -mc admin decommission status myminio/ http://server{5...8}/disk{1...4} -``` - -*Example: List all current decommissioning status of all pools.* - -```sh -mc admin decommission status myminio/ -``` - -*Example: Cancel an ongoing decommissioning of a pool.* - -```sh -mc admin decommission cancel myminio/ http://server{5...8}/disk{1...4} -``` - -*Example: Cancel all decommissioning of a pool.* - -```sh -mc admin decommission cancel myminio/ -``` - - -### Command `heal` - monitor healing of bucket(s) and object(s) on MinIO Server -Healing is automatic on server side which runs on a continuous basis on a low priority thread, this -command allows you to monitor the running heals on the server side. - -```sh -NAME: - mc admin heal - monitor healing of bucket(s) and object(s) on MinIO Server - -USAGE: - mc admin heal [FLAGS] TARGET - -FLAGS: - --help, -h show help -``` - -*Example: Monitor healing status on a running server at alias 'myminio'.* - -```sh -mc admin heal myminio/ -``` - - -### Command `trace` - Show HTTP call trace for all incoming and internode on MinIO -`trace` command displays server HTTP trace of one or all MinIO servers (under distributed cluster) - -```sh -NAME: - mc admin trace - Show HTTP call trace for all incoming and internode on MinIO - -FLAGS: - --verbose, -v print verbose trace - --all, -a trace all call types - --call value trace only matching call types. See CALL TYPES below for list. (default: s3) - --status-code value trace only matching status code - --method value trace only matching HTTP method - --funcname value trace only matching func name - --path value trace only matching path - --node value trace only matching servers - --request-header value trace only matching request headers - --errors, -e trace only failed requests - --filter-request trace calls only with request bytes greater than this threshold, use with filter-size - --filter-response trace calls only with response bytes greater than this threshold, use with filter-size - --response-duration 5ms trace calls only with response duration greater than this threshold (e.g. 5ms) (default: 0s) - --filter-size value filter size, use with filter (see UNITS) - --help, -h show help - -CALL TYPES: - batch-keyrotation: Trace Batch KeyRotation (alias: brot) - batch-replication: Trace Batch Replication (alias: brep) - bootstrap: Trace Bootstrap operations - decommission: Trace Decommission operations (alias: decom) - ftp: Trace FTP operations - healing: Trace Healing operations (alias: heal) - ilm: Trace ILM operations - internal: Trace Internal RPC calls - os: Trace Operating System calls - rebalance: Trace Server Pool Rebalancing operations - replication-resync: Trace Replication Resync operations (alias: resync) - s3: Trace S3 API calls - scanner: Trace Scanner calls - storage: Trace Storage calls - -UNITS - --filter-size flags use with --filter-response or --filter-request accept human-readable case-insensitive number - suffixes such as "k", "m", "g" and "t" referring to the metric units KB, - MB, GB and TB respectively. Adding an "i" to these prefixes, uses the IEC - units, so that "gi" refers to "gibibyte" or "GiB". A "b" at the end is - also accepted. Without suffixes the unit is bytes. - -``` - -*Example: Display MinIO server http trace.* - -```sh -mc admin trace myminio -172.16.238.1 [REQUEST (objectAPIHandlers).ListBucketsHandler-fm] [154828542.525557] [2019-01-23 23:17:05 +0000] -172.16.238.1 GET / -172.16.238.1 Host: 172.16.238.3:9000 -172.16.238.1 X-Amz-Date: 20190123T231705Z -172.16.238.1 Authorization: AWS4-HMAC-SHA256 Credential=minio/20190123/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=8385097f264efaf1b71a9b56514b8166bb0a03af8552f83e2658f877776c46b3 -172.16.238.1 User-Agent: MinIO (linux; amd64) minio-go/v7.0.8 mc/2019-01-23T23:15:38Z -172.16.238.1 X-Amz-Content-Sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 -172.16.238.1 -172.16.238.1 -172.16.238.1 [RESPONSE] [154828542.525557] [2019-01-23 23:17:05 +0000] -172.16.238.1 200 OK -172.16.238.1 X-Amz-Request-Id: 157C9D641F42E547 -172.16.238.1 X-Minio-Deployment-Id: 5f20fd91-6880-455f-a26d-07804b6821ca -172.16.238.1 X-Xss-Protection: 1; mode=block -172.16.238.1 Accept-Ranges: bytes -172.16.238.1 Content-Security-Policy: block-all-mixed-content -172.16.238.1 Content-Type: application/xml -172.16.238.1 Server: MinIO/RELEASE.2019-09-05T23-24-38Z -172.16.238.1 Vary: Origin -... -``` - -*Example: Show verbose console trace for MinIO server.* - -```sh - mc admin trace -v -a myminio -``` - -*Example: Show trace only for failed requests for MinIO server.* - -```sh - mc admin trace -v -e myminio -``` - -*Example: Show verbose console trace for requests with '503' status code.* - -```sh - mc admin trace -v --status-code 503 myminio -``` - -*Example: Show console trace for a specific path.* - -```sh - mc admin trace --path my-bucket/my-prefix/* myminio -``` - -*Example: Show console trace for requests with '404' and '503' status code.* - -```sh - mc admin trace --status-code 404 --status-code 503 myminio -``` - -*Example: Show trace only for requests bytes greater than 1MB.* - -```sh - mc admin trace --filter-request --filter-size 1MB myminio -``` - -*Example: Show trace only for response bytes greater than 1MB.* - -```sh - mc admin trace --filter-response --filter-size 1MB myminio -``` - -*Example: Show trace only for requests operations duration greater than 5ms.* - -```sh - mc admin trace --response-duration 5ms myminio -``` - - -### Command `scanner` - Provide MinIO scanner info -`scanner` provide MinIO scanner info. - -```sh -NAME: - mc admin scanner - provide MinIO scanner info - -FLAGS: - --help, -h show help -``` - -*Example: Show scanner trace for MinIO server.* - -```sh - mc admin scanner trace myminio -``` - -*Example: Display current in-progress all scanner operations.* - -```sh - mc admin scanner status myminio/ -``` - - -### Command `console` - show console logs for MinIO server -This command is deprecated and will be removed in a future release. Use 'mc support logs show' instead. - - -### Command `logs` - Show MinIO logs -`logs` show console logs for MinIO server. - -```sh -NAME: - mc admin logs - show MinIO logs -USAGE: - mc admin logs [FLAGS] TARGET [NODENAME] - -FLAGS: - --last value, -l value show last n log entries (default: 10) - --type value, -t value list error logs by type. Valid options are '[minio, application, all]' (default: "all") - --help, -h show help -``` - -*Example: Show logs for a MinIO server with alias 'myminio'.* - -```sh - mc admin logs myminio -``` - -*Example: Show last 5 log entries for node 'node1' for a MinIO server with alias 'myminio'.* - -```sh - mc admin logs --last 5 myminio node1 -``` - -*Example: Show application errors in logs for a MinIO server with alias 'myminio'.* - -```sh - mc admin logs --type application myminio -``` - - -### Command `cluster` - Manage MinIO cluster metadata -`cluster` manage MinIO cluster metadata. - -```sh -NAME: - mc admin cluster - manage MinIO cluster metadata - -USAGE: - mc admin cluster COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -FLAGS: - --help, -h show help -``` - -*Example: Recover bucket metadata for all buckets from previously saved bucket metadata backup.* - -```sh - mc admin cluster bucket import myminio /backups/cluster-metadata.zip -``` - -*Example: Save metadata of all buckets to a zip file.* - -```sh - mc admin cluster bucket export myminio -``` - -*Example: Set IAM info from previously exported metadata zip file.* - -```sh - mc admin cluster iam import myminio /tmp/myminio-iam-info.zip -``` - -*Example: Download all IAM metadata for cluster into zip file.* - -```sh - mc admin cluster iam export myminio -``` - - -### Command `rebalance` - Manage MinIO rebalance -`rebalance` manage MinIO rebalance. - -```sh -NAME: - mc admin rebalance - Manage MinIO rebalance - -USAGE: - mc admin rebalance COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -FLAGS: - --help, -h show help -``` - -*Example: Start rebalance on a MinIO deployment with alias myminio.* - -```sh - mc admin rebalance start myminio -``` - -*Example: Stop an ongoing rebalance on a MinIO deployment with alias myminio.* - -```sh - mc admin rebalance stop myminio -``` - -*Example: Summarize ongoing rebalance on a MinIO deployment with alias myminio.* - -```sh - mc admin rebalance status myminio -``` - - - -### Command `prometheus` - Manages prometheus config settings - -`generate` command generates the prometheus config (To be pasted in `prometheus.yml`) -`metrics` command print cluster wide prometheus metrics - -```sh -NAME: - mc admin prometheus - manages prometheus config - -USAGE: - mc admin prometheus COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - generate generates prometheus config - -``` - -_Example: Generates prometheus config for an ._ - -```sh -mc admin prometheus generate -- job_name: minio-job - bearer_token: - metrics_path: /minio/v2/metrics/cluster - scheme: http - static_configs: - - targets: ['localhost:9000'] -``` - - - -### Command `kms` - perform KMS management operations - -The `kms` command can be used to perform KMS management operations. - -```sh -NAME: - mc admin kms - perform KMS management operations - -USAGE: - mc admin kms COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] -``` - -The `key` sub-command can be used to perform master key management operations. - -```sh -NAME: - mc admin kms key - manage KMS keys - -USAGE: - mc admin kms key COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] -``` - -*Example: Display status information for the default master key* - -```sh -mc admin kms key status play -Key: my-minio-key - • Encryption ✔ - • Decryption ✔ -``` - -*Example: Create a new master key at the KMS* - -```sh -mc admin kms key create play my-key - -Created master key `my-key` successfully -``` - -*Example: Display status information for one particular master key* - -```sh -mc admin kms key status play my-key -Key: my-key - • Encryption ✔ - • Decryption ✔ -``` - - -This command is deprecated and will be removed in a future release. Use 'mc quota set|info|clear' instead. - -### Command `remote` - configure remote target buckets -`remote` command manages remote bucket targets on MinIO server. - -```sh -NAME: - mc admin bucket remote - configure remote bucket targets - -This command is deprecated and will be removed in a future release. Use 'mc replicate add|update|rm` commands to manage remote targets diff --git a/docs/minio-client-complete-guide.md b/docs/minio-client-complete-guide.md deleted file mode 100644 index 5ffa1a1c9f..0000000000 --- a/docs/minio-client-complete-guide.md +++ /dev/null @@ -1,2305 +0,0 @@ -# MinIO Client Complete Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -MinIO Client (mc) provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage service (AWS Signature v2 and v4). - -``` -alias set, remove and list aliases in configuration file -ls list buckets and objects -mb make a bucket -rb remove a bucket -cp copy objects -mirror synchronize object(s) to a remote site -cat display object contents -head display first 'n' lines of an object -pipe stream STDIN to an object -share generate URL for temporary access to an object -find search for objects -sql run sql queries on objects -stat show object metadata -mv move objects -tree list buckets and objects in a tree format -du summarize disk usage recursively -retention set retention for object(s) and bucket(s) -legalhold set legal hold for object(s) -diff list differences in object name, size, and date between two buckets -rm remove objects -version manage bucket versioning -ilm manage bucket lifecycle -encrypt manage bucket encryption config -event manage object notifications -watch listen for object notification events -undo undo PUT/DELETE operations -anonymous manage anonymous access to buckets and objects -tag manage tags for bucket(s) and object(s) -replicate configure server side bucket replication -admin manage MinIO servers -update update mc to latest release -support supportability tools like profile, register, callhome, inspect -ping perform liveness check -quota manage bucket quota -batch manage batch jobs -get get s3 object to local -od measure single stream upload and download -put upload an object to a bucket -ready checks if the cluster is ready or not -``` - -## 1. Download MinIO Client -### Docker Stable -```sh -docker pull minio/mc -docker run minio/mc ls play -``` - -### Docker Edge -```sh -docker pull minio/mc:edge -docker run minio/mc:edge ls play -``` - -**Note:** Above examples run `mc` against MinIO [_play_ environment](#test-your-setup) by default. To run `mc` against other S3 compatible servers, start the container this way: - -```sh -docker run -it --entrypoint=/bin/sh minio/mc -``` - -then use the [`mc alias` command](#3-add-a-cloud-storage-service). - -### Homebrew (macOS) -Install mc packages using [Homebrew](http://brew.sh/) - -```sh -brew install minio/stable/mc -mc --help -``` - -### Binary Download (GNU/Linux) -| Platform | Architecture | URL | -| ---------- | -------- |------| -|GNU/Linux|64-bit Intel|https://dl.min.io/client/mc/release/linux-amd64/mc | -||64-bit PPC|https://dl.min.io/client/mc/release/linux-ppc64le/mc | - -```sh -chmod +x mc -./mc --help -``` - -### Binary Download (Microsoft Windows) -| Platform | Architecture | URL | -| ---------- | -------- |------| -|Microsoft Windows|64-bit Intel|https://dl.min.io/client/mc/release/windows-amd64/mc.exe | - -``` -mc.exe --help -``` - -### Install from Source -Source installation is intended only for developers and advanced users. `mc update` command does not support update notifications for source based installations. Please download official releases from https://min.io/download/#minio-client. - -If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). - -```sh -go get -d github.com/minio/mc -cd ${GOPATH}/src/github.com/minio/mc -make -``` - -## 2. Run MinIO Client - -### GNU/Linux - -```sh -chmod +x mc -./mc --help -``` - -### macOS - -```sh -chmod 755 mc -./mc --help -``` - -### Microsoft Windows - -``` -mc.exe --help -``` - -## 3. Add a Cloud Storage Service -Note: If you are planning to use `mc` only on POSIX compatible filesystems, you may skip this step and proceed to **Step 4**. - -To add one or more Amazon S3 compatible hosts, please follow the instructions below. `mc` stores all its configuration information in ``~/.mc/config.json`` file. - -#### Usage - -```sh -mc alias set [YOUR-ACCESS-KEY] [YOUR-SECRET-KEY] [--api API-SIGNATURE] -``` - -Keys must be supplied by argument or standard input. - -Alias is simply a short name to your cloud storage service. S3 end-point, access and secret keys are supplied by your cloud storage provider. API signature is an optional argument. By default, it is set to "S3v4". - -### Example - MinIO Cloud Storage -MinIO server displays URL, access and secret keys. - - -```sh -mc alias set minio http://192.168.1.51 BKIKJAA5BMMU2RHO6IBB V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12 --api S3v4 -``` - -### Example - Amazon S3 Cloud Storage -Get your AccessKeyID and SecretAccessKey by following [AWS Credentials Guide](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html). - -```sh -mc alias set s3 https://s3.amazonaws.com BKIKJAA5BMMU2RHO6IBB V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12 --api S3v4 -``` - -### Example - Google Cloud Storage -Get your AccessKeyID and SecretAccessKey by following [Google Credentials Guide](https://cloud.google.com/storage/docs/migrating?hl=en#keys) - -```sh -mc alias set gcs https://storage.googleapis.com BKIKJAA5BMMU2RHO6IBB V8f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12 -``` - -### Example - IBM Cloud Object Storage -Get your AccessKeyID and SecretAccessKey by creating a service account [with HMAC credentials](https://cloud.ibm.com/docs/cloud-object-storage?topic=cloud-object-storage-uhc-hmac-credentials-main). This option is only available from **Resources > Cloud Object Storage > Service credentials** (not from Manage > Access (IAM) > Service IDs). Once created, the values you will use for `accessKey` and `secretKey` are found in the `cos_hmac_keys` field of the service credentials. - -Finally, the url will be the **public endpoint specific to the region/resiliency** that you chose when setting up your bucket. There is no single, global url for all buckets. Find your bucket's URL in the console by going to Cloud Object Storage > Buckets > [your-bucket] > Configuration > Endpoints > public. Remember to prepend `https://` to the URL provided. - -```sh -mc alias set ibm https://s3.us-east.cloud-object-storage.appdomain.cloud BKIKJAA5BMMU2RHO6IBB V8f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12 --api s3v4 -``` - -**Note**: The service ID you create must have an access policy granting it access to your Object Storage instance(s). - -### Example - Specify keys using standard input - -#### Prompt - -```sh -mc alias set minio http://192.168.1.51 --api S3v4 -Enter Access Key: BKIKJAA5BMMU2RHO6IBB -Enter Secret Key: V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12 -``` - -#### Pipe from STDIN - -```sh -echo -e "BKIKJAA5BMMU2RHO6IBB\nV7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12" | \ -mc alias set minio http://192.168.1.51 --api S3v4 -``` - -### Specify temporary host configuration through environment variable - -#### Static credentials -``` -export MC_HOST_=https://:@ -``` - -Example: -```sh -export MC_HOST_myalias=https://Q3AM3UQ867SPQQA43P2F:zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG@play.min.io -mc ls myalias -``` - -#### Rotating credentials -``` -export MC_HOST_=https://::@ -``` - -Example: -```sh -export MC_HOST_myalias=https://Q3AM3UQ867SPQQA43P2F:zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG:eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiJOVUlCT1JaWVRWMkhHMkJNUlNYUiIsImF1ZCI6IlBvRWdYUDZ1Vk80NUlzRU5SbmdEWGo1QXU1WWEiLCJhenAiOiJQb0VnWFA2dVZPNDVJc0VOUm5nRFhqNUF1NVlhIiwiZXhwIjoxNTM0ODk2NjI5LCJpYXQiOjE1MzQ4OTMwMjksImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojk0NDMvb2F1dGgyL3Rva2VuIiwianRpIjoiNjY2OTZjZTctN2U1Ny00ZjU5LWI0MWQtM2E1YTMzZGZiNjA4In0.eJONnVaSVHypiXKEARSMnSKgr-2mlC2Sr4fEGJitLcJF_at3LeNdTHv0_oHsv6ZZA3zueVGgFlVXMlREgr9LXA@play.min.io -mc ls myalias -``` - - -## 4. Test Your Setup -`mc` is pre-configured with https://play.min.io, aliased as "play". It is a hosted MinIO server for testing and development purpose. To test Amazon S3, simply replace "play" with "s3" or the alias you used at the time of setup. - -*Example:* - -List all buckets from https://play.min.io - -```sh -mc ls play -[2016-03-22 19:47:48 PDT] 0B my-bucketname/ -[2016-03-22 22:01:07 PDT] 0B mytestbucket/ -[2016-03-22 20:04:39 PDT] 0B mybucketname/ -[2016-01-28 17:23:11 PST] 0B newbucket/ -[2016-03-20 09:08:36 PDT] 0B s3git-test/ -``` - -## 5. Everyday Use -You may add shell aliases to override your common Unix tools. - -```sh -alias ls='mc ls' -alias cp='mc cp' -alias cat='mc cat' -alias mkdir='mc mb' -alias pipe='mc pipe' -alias find='mc find' -alias tree='mc tree' -``` - -## 6. Global Options - -### Option [--autocompletion] -Install auto-completion for your shell. - -### Option [--debug] -Debug option enables debug output to console. - -*Example: Display verbose debug output for `ls` command.* - -```sh -mc --debug ls play -mc: GET / HTTP/1.1 -Host: play.min.io -User-Agent: MinIO (darwin; amd64) minio-go/1.0.1 mc/2016-04-01T00:22:11Z -Authorization: AWS4-HMAC-SHA256 Credential=**REDACTED**/20160408/us-east-1/s3/aws4_request, SignedHeaders=expect;host;x-amz-content-sha256;x-amz-date, Signature=**REDACTED** -Expect: 100-continue -X-Amz-Content-Sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 -X-Amz-Date: 20160408T145236Z -Accept-Encoding: gzip - -mc: HTTP/1.1 200 OK -Transfer-Encoding: chunked -Accept-Ranges: bytes -Content-Type: text/xml; charset=utf-8 -Date: Fri, 08 Apr 2016 14:54:55 GMT -Server: MinIO/DEVELOPMENT.2016-04-07T18-53-27Z (linux; amd64) -Vary: Origin -X-Amz-Request-Id: HP30I0W2U49BDBIO - -mc: Response Time: 1.220112837s - -[...] - -[2016-04-08 03:56:14 IST] 0B albums/ -[2016-04-04 16:11:45 IST] 0B backup/ -[2016-04-01 20:10:53 IST] 0B deebucket/ -[2016-03-28 21:53:49 IST] 0B guestbucket/ -``` - -### Option [--json] -JSON option enables parseable output in [JSON lines](http://jsonlines.org/), also called as [NDJSON](http://ndjson.org/) format. - -*Example: List all buckets from MinIO play service.* - -```sh -mc --json ls play -{"status":"success","type":"folder","lastModified":"2016-04-08T03:56:14.577+05:30","size":0,"key":"albums/"} -{"status":"success","type":"folder","lastModified":"2016-04-04T16:11:45.349+05:30","size":0,"key":"backup/"} -{"status":"success","type":"folder","lastModified":"2016-04-01T20:10:53.941+05:30","size":0,"key":"deebucket/"} -{"status":"success","type":"folder","lastModified":"2016-03-28T21:53:49.217+05:30","size":0,"key":"guestbucket/"} -``` - -### Option [--no-color] -This option disables the color theme. It is useful for dumb terminals. - -### Option [--quiet] -Quiet option suppress chatty console output. - -### Option [--config-dir] -Use this option to set a custom config path. - -### Option [ --insecure] -Skip SSL certificate verification. - -### Option [--version] -Display the current version of `mc` installed - -### Option [--limit-upload] -limits uploads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) - -### Option [--limit-download] -limits downloads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) - -*Example: Print version of mc.* - -```sh -mc --version -mc version RELEASE.2020-04-25T00-43-23Z -``` - -## 7. Commands - -| | | | | -|:------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------|:-------------------------------------------------------------------------|----------------------------------------------------| -| [**ls** - list buckets and objects](#ls) | [**tree** - list buckets and objects in a tree format](#tree) | [**mb** - make a bucket](#mb) | [**cat** - display object contents](#cat) | -| [**cp** - copy objects](#cp) | [**rb** - remove a bucket](#rb) | [**pipe** - stream STDIN to an object](#pipe) | [**version** - manage bucket version](#version) | -| [**share** - generate URL for temporary access to an object](#share) | [**rm** - remove objects](#rm) | [**find** - find files and objects](#find) | [**undo** - undo PUT/DELETE operations](#undo) | -| [**diff** - list differences in object name, size, and date between two buckets](#diff) | [**mirror** - synchronize object(s) to a remote site](#mirror) | [**ilm** - manage bucket lifecycle policies](#ilm) | [**replicate** - manage bucket server side replication](#replicate) | -| [**alias** - manage aliases](#alias) | [**anonymous** - set public policy on bucket or prefix](#anonymous) | [**event** - manage events on your buckets](#event) | [**encrypt** - manage bucket encryption](#encrypt) | -| [**update** - manage software updates](#update) | [**watch** - watch for events](#watch) | [**retention** - set retention for object(s)](#retention) | [**sql** - run sql queries on objects](#sql) | -| [**head** - display first 'n' lines of an object](#head) | [**stat** - stat contents of objects and folders](#stat) | [**legalhold** - set legal hold for object(s)](#legalhold) | [**mv** - move objects](#mv) | -| [**du** - summarize disk usage recursively](#du) | [**tag** - manage tags for bucket and object(s)](#tag) | [**admin** - manage MinIO servers](#admin) | [**support** - generate profile data for debugging purposes](#support) | -| [**ping** - perform liveness check](#ping) | [**batch** - manage batch jobs](#batch) | [**get** - get s3 object to local](#get) | [**put** - upload an object to a bucket](#put) | -| [**od** - measure single stream upload and download](#od) | [**ready** - checks if the cluster is ready or not](#ready) | | | - - - -### Command `ls` -`ls` command lists files, buckets and objects. Use `--incomplete` flag to list partially copied content. - -```sh -USAGE: - mc ls [FLAGS] TARGET [TARGET ...] - -FLAGS: - --rewind value list all object versions no later than specified date - --versions list all versions - --recursive, -r list recursively - --incomplete, -I list incomplete uploads - --help, -h show help -``` - -*Example: List all buckets on https://play.min.io.* - -```sh -mc ls play -[2016-04-08 03:56:14 IST] 0B albums/ -[2016-04-04 16:11:45 IST] 0B backup/ -[2016-04-01 20:10:53 IST] 0B deebucket/ -[2016-03-28 21:53:49 IST] 0B guestbucket/ -[2016-04-08 20:58:18 IST] 0B mybucket/ -``` - -*Example: List all contents versions if the bucket versioning is enabled* -```sh -mc ls --versions s3/mybucket -[2020-09-21 16:25:31 CET] 903KiB UiL4wSZS2OkST5aJ3AFAwtzZxHTW_9VC v1 PUT foo -[2020-09-18 21:18:44 CET] 0B sK4pldVmOJqCJzX2aJvxX4eWMnuqazs9 v1 DEL bar -``` - -*Example: List contents created earlier than 3 days* -```sh -mc ls --rewind 3d s3/mybucket -[2020-09-18 21:18:44 CET] 0B sK4pldVmOJqCJzX2aJvxX4eWMnuqazs9 v1 DEL bar -``` - - -### Command `tree` - -`tree` command lists buckets and directories in a tree format. Use `--files` flag to include files/objects in listing. - -```sh -USAGE: - mc tree [FLAGS] TARGET [TARGET ...] - -FLAGS: - --help, -h show help - --files, -f include files in tree - --depth, -d set the maximum depth of the tree - --rewind value display tree no later than specified date -``` - -_Example: List all contents on play/test-bucket in a tree format._ - -```sh -mc tree play/test-bucket -play/test-bucket/ -├─ dir_a -├─ dir_b -│ └─ dir_bb -└─ dir_x - └─ dir_xx -``` - -*Example: List all objects with the state of 3 days earlier* -```sh -mc tree --files --rewind 3d play/test-bucket -play/test-bucket/ -├─ object1 -└─ object2 -``` - - -### Command `mb` -`mb` command creates a new bucket on an object storage. On a filesystem, it behaves like `mkdir -p` command. Bucket is equivalent of a drive or mount point in filesystems and should not be treated as folders. MinIO does not place any limits on the number of buckets created per user. -On Amazon S3, each account is limited to 100 buckets. Please refer to [Buckets Restrictions and Limitations on S3](http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) for more information. - -```sh -USAGE: - mc mb [FLAGS] TARGET [TARGET...] - -FLAGS: - --region value specify bucket region; defaults to 'us-east-1' (default: "us-east-1") - --ignore-existing, -p ignore if bucket/directory already exists - --with-lock, -l enable object lock - --help, -h show help - -``` - -*Example: Create a new bucket named "mybucket" on https://play.min.io.* - - -```sh -mc mb play/mybucket -Bucket created successfully ‘play/mybucket’. -``` - -*Example: Create a new bucket named "mybucket" on https://s3.amazonaws.com.* - - -```sh -mc mb s3/mybucket --region=us-west-1 -Bucket created successfully ‘s3/mybucket’. -``` - - -### Command `rb` -`rb` command removes a bucket and all its contents on an object storage. On a filesystem, it behaves like `rmdir` command. - -> NOTE: When a bucket is removed all bucket configurations associated with the bucket will also be removed. All objects and their versions will be removed as well. If you need to preserve bucket and its configuration - only empty the objects and versions in a bucket use `mc rm` instead. - -```sh -USAGE: - mc rb [FLAGS] TARGET [TARGET...] - -FLAGS: - --force force a recursive remove operation on all object versions - --dangerous allow site-wide removal of objects - --help, -h show help - -``` - -*Example: Remove a bucket named "mybucket" on https://play.min.io.* - - -```sh -mc rb play/mybucket --force -Bucket removed successfully ‘play/mybucket’. -``` - - -### Command `get` -`get` get s3 object to local. - -```sh -USAGE: - mc get [FLAGS] SOURCE TARGET - -FLAGS: - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) [$MC_ENCRYPT_KEY] - --encrypt value encrypt/decrypt objects (using server-side encryption with server managed keys) [$MC_ENCRYPT] - --config-dir value, -C value path to configuration folder (default: "/root/.mc") [$MC_CONFIG_DIR] - --quiet, -q disable progress bar display [$MC_QUIET] - --no-color disable color theme [$MC_NO_COLOR] - --json enable JSON lines formatted output [$MC_JSON] - --debug enable debug output [$MC_DEBUG] - --insecure disable SSL certificate verification [$MC_INSECURE] - --limit-upload value limits uploads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_UPLOAD] - --limit-download value limits downloads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_DOWNLOAD] - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Get an object from S3 storage to local file system* - - -```sh -mc get myminio/mybucket/myobject ./myobject -...mybucket/myobject: 36.73 MiB / ? ━┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉━┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉┉━ 92.64 MiB/s 0s -``` - - -### Command `put` -`put` upload an object to a bucket. - -```sh -USAGE: - mc put [FLAGS] SOURCE TARGET - -FLAGS: - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) [$MC_ENCRYPT_KEY] - --encrypt value encrypt/decrypt objects (using server-side encryption with server managed keys) [$MC_ENCRYPT] - --config-dir value, -C value path to configuration folder (default: "/root/.mc") [$MC_CONFIG_DIR] - --quiet, -q disable progress bar display [$MC_QUIET] - --no-color disable color theme [$MC_NO_COLOR] - --json enable JSON lines formatted output [$MC_JSON] - --debug enable debug output [$MC_DEBUG] - --insecure disable SSL certificate verification [$MC_INSECURE] - --limit-upload value limits uploads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_UPLOAD] - --limit-download value limits downloads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_DOWNLOAD] - --parallel value, -P value upload number of parts in parallel (default: 4) - --part-size value, -s value each part size (default: "16MiB") - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Put an object from local file system to S3 bucket with name* - - -```sh -mc put ./myobject myminio/mybucket/myobject -./myobject: 36.73 MiB / 36.73 MiB ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 182.21 MiB/s 0s -``` - - -### Command `od` -`od` measure single stream upload and download. - -```sh -USAGE: - mc od [OPERANDS] - -OPERANDS: - if= source stream to upload - of= target path to upload to - size= size of each part. If not specified, will be calculated from the source stream size. - parts= number of parts to upload. If not specified, will calculated from the source file size. - skip= number of parts to skip. - -FLAGS: - --config-dir value, -C value path to configuration folder (default: "/root/.mc") [$MC_CONFIG_DIR] - --quiet, -q disable progress bar display [$MC_QUIET] - --no-color disable color theme [$MC_NO_COLOR] - --json enable JSON lines formatted output [$MC_JSON] - --debug enable debug output [$MC_DEBUG] - --insecure disable SSL certificate verification [$MC_INSECURE] - --limit-upload value limits uploads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_UPLOAD] - --limit-download value limits downloads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_DOWNLOAD] - --help, -h show help -``` - -*Example: Upload 200MiB of a file to a bucket in 5 parts of size 40MiB.* - - -```sh -mc od if=file.txt of=play/my-bucket/file.txt size=40MiB parts=5 -Transferred: 200 MiB, Parts: 5, Time: 455ms, Speed: 81 MiB/s -``` - -*Example: Upload a full file to a bucket with 40MiB parts.* - - -```sh -mc od if=file.txt of=play/my-bucket/file.txt size=40MiB -Transferred: 200 MiB, Parts: 5, Time: 455ms, Speed: 81 MiB/s -``` - - -### Command `ready` -`ready` checks if the cluster is ready or not - -```sh -USAGE: - mc ready [FLAGS] TARGET - -FLAGS: - --cluster-read check if the cluster has enough read quorum - --maintenance check if the cluster is taken down for maintenance - --config-dir value, -C value path to configuration folder (default: "/root/.mc") [$MC_CONFIG_DIR] - --quiet, -q disable progress bar display [$MC_QUIET] - --no-color disable color theme [$MC_NO_COLOR] - --json enable JSON lines formatted output [$MC_JSON] - --debug enable debug output [$MC_DEBUG] - --insecure disable SSL certificate verification [$MC_INSECURE] - --limit-upload value limits uploads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_UPLOAD] - --limit-download value limits downloads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_DOWNLOAD] - --help, -h show help -``` - -*Example: Check if the cluster is ready or not.* -```sh -mc ready myminio -The cluster is ready -``` - -*Example: Check if the cluster has enough read quorum* -```sh -mc ready myminio --cluster-read -The cluster is ready -``` - - -### Command `du` -`du` command summarizes disk usage recursively - -```sh -USAGE: - mc du [FLAGS] TARGET -FLAGS: - --depth value, -d value print the total for a folder prefix only if it is N or fewer levels below the command line argument (default: 0) - --recursive, -r recursively print the total for a folder prefix - --rewind value include all object versions no later than specified date - --versions include all object versions - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) - --help, -h show help -``` - -*Example: Summarize disk usage of 'jazz-songs' bucket recursively.* -```sh -mc du s3/jazz-songs -``` - -*Example: Summarize disk usage of 'jazz-songs' bucket with all objects versions* -```sh -mc du --versions s3/jazz-songs/ -``` - - -### Command `cat` -`cat` command concatenates contents of a file or object to another. You may also use it to simply display the contents to stdout - -```sh -USAGE: - mc cat [FLAGS] SOURCE [SOURCE...] - -FLAGS: - --rewind value display an earlier object version - --version-id value, --vid value display a specific version of an object - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Display the contents of a text file `myobject.txt`* - -```sh -mc cat play/mybucket/myobject.txt -Hello MinIO!! -``` - -*Example: Display the contents of a server encrypted object `myencryptedobject.txt`* - -```sh -mc cat --encrypt-key "play/mybucket=32byteslongsecretkeymustbegiven1" play/mybucket/myencryptedobject.txt -Hello MinIO!! -``` - -*Example: Display the contents of a server encrypted object `myencryptedobject.txt`. Pass base64 encoded string if encryption key contains non-printable character like tab* - -```sh -mc cat --encrypt-key "play/mybucket=MzJieXRlc2xvbmdzZWNyZWFiY2RlZmcJZ2l2ZW5uMjE=" play/mybucket/myencryptedobject.txt -Hello MinIO!! -``` - -*Example: Display the content of an object 10 days earlier* - -```sh -mc cat --rewind "10d" play/mybucket/myobject -Hello MinIO ten days earlier! -``` - -*Example: Display the content of an object at a specific date/time in the past* - -```sh -mc cat --rewind "2020.03.24T10:00" play/mybucket/myobject -Hello MinIO from the past! -``` - - - -### Command `sql` -`sql` run sql queries on objects. - -```sh -USAGE: - mc sql [FLAGS] TARGET [TARGET...] - -FLAGS: - --query value, -e value sql query expression - --recursive, -r sql query recursively - --csv-input value csv input serialization option - --json-input value json input serialization option - --compression value input compression type - --csv-output value csv output serialization option - --json-output value json output serialization option - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values - -INPUT SERIALIZATION - --csv-input or --json-input can be used to specify input data format. Format is - specified by a string with pattern "key=value,..." for valid key(s). - - DATA FORMAT: - csv: Use --csv-input flag - Valid keys: - RecordDelimiter (rd) - FieldDelimiter (fd) - QuoteChar (qc) - QuoteEscChar (qec) - FileHeader (fh) - Comments (cc) - QuotedRecordDelimiter (qrd) - - json: Use --json-input flag - Valid keys: - Type - parquet: If object name ends in .parquet, this is automatically interpreted. - -OUTPUT SERIALIZATION - --csv-output or --json-output can be used to specify output data format. Format is - specified by a string with pattern "key=value,..." for valid key(s). - DATA FORMAT: - csv: Use --csv-output flag - Valid keys: - RecordDelimiter (rd) - FieldDelimiter (fd) - QuoteChar (qc) - QuoteEscChar (qec) - QuoteFields (qf) - - json: Use --json-output flag - Valid keys: - RecordDelimiter (rd) - -COMPRESSION TYPE - --compression specifies if the queried object is compressed. - Valid values: NONE | GZIP | BZIP2 - -``` - -*Example: Select all columns on a set of objects recursively on AWS S3* - -```sh -mc sql --recursive --query "select * from S3Object" s3/personalbucket/my-large-csvs/ -``` - -*Example: Run an aggregation query on an object on MinIO* - -```sh -mc sql --query "select count(s.power) from S3Object" myminio/iot-devices/power-ratio.csv -``` - -*Example: Run an aggregation query on an encrypted object with customer provided keys* - -```sh -mc sql --encrypt-key "myminio/iot-devices=32byteslongsecretkeymustbegiven1" \ - --query "select count(s.power) from S3Object" myminio/iot-devices/power-ratio-encrypted.csv -``` - -For more query examples refer to official AWS S3 documentation [here](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html#RESTObjectSELECTContent-responses-examples) - - -### Command `head` -`head` display first 'n' lines of an object - -```sh -USAGE: - mc head [FLAGS] SOURCE [SOURCE...] - -FLAGS: - -n value, --lines value print the first 'n' lines (default: 10) - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Display the first line of a text file `myobject.txt`* - -```sh -mc head -n 1 play/mybucket/myobject.txt -Hello!! -``` - -*Example: Display the first line of a server encrypted object `myencryptedobject.txt`* - -```sh -mc head -n 1 --encrypt-key "play/mybucket=32byteslongsecretkeymustbegiven1" play/mybucket/myencryptedobject.txt -Hello!! -``` - -*Example: Display the first line of the content of an object, 1 year earlier* -```sh -mc head -n 1 --rewind 365d play/mybucket/myencryptedobject.txt -Hello!! -``` - -### Command `lock` -`lock` sets and gets object lock configuration - -> `RELEASE.2020-09-18T00-13-21Z` deprecates and removes the `lock` command. -The [retention](#retention) command fully replaces `lock` functionality. - - -### Command `retention` -`retention` sets object retention for objects with a given prefix *or* the default -retention settings for a bucket. - -```sh -USAGE: - mc retention COMMAND [FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - set Sets retention for object(s) or bucket - clear Clears retention for object(s) or bucket - info Returns retention for object(s) or bucket - help, h Shows a list of commands or help for one command - -FLAGS: - --bypass bypass governance - --recursive, -r apply retention recursively - --json enable JSON formatted output - --help, -h show help -``` - -*Example: Set governance for 30 days for object `prefix` on bucket `mybucket`* - -```sh -mc retention set governance 30d myminio/mybucket/prefix -r -Object retention successfully set for objects with prefix `myminio/mybucket/prefix`. - -``` -*Objects created with prefix `prefix` in the above bucket `mybucket` cannot be deleted until the compliance period is over* - -```sh -mc cp ~/comp.csv myminio/mybucket/prefix/ -mc rm myminio/mybucket/prefix/comp.csv -Removing `myminio/mybucket/prefix/comp.csv`. -``` - -This creates delete marker (soft delete) you will be able to check if your objects still exist with - -```sh -mc ls --versions myminio/mybucket/prefix/comp.csv -``` - -*Example: Set compliance for 30 days as default retention setting on bucket `mybucket`* - -```sh -mc retention set --default compliance 30d myminio/mybucket -``` - -*Objects created in the above bucket `mybucket` cannot be deleted until the compliance period is over* - -```sh -mc cp ~/comp.csv myminio/mybucket/data.csv -mc rm myminio/mybucket/data.csv -Removing `myminio/mybucket/data.csv -``` - -This creates delete marker (soft delete) you will be able to check if your objects still exist with - -```sh -mc ls --versions myminio/mybucket/prefix/data.csv -``` - -*Example: Clear object retention for a specific version of a specific object* -```sh -mc retention clear myminio/mybucket/prefix/obj.csv --version-id "3Jr2x6fqlBUsVzbvPihBO3HgNpgZgAnp" -``` - -*Example: Show object retention for recursively for all versions of all objects under prefix* -```sh -mc retention info myminio/mybucket/prefix --recursive --versions -``` - - -### Command `batch` -`batch` manage batch jobs. - -```sh -NAME: - mc batch - manage batch jobs - -USAGE: - mc batch COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - generate generate a new batch job definition - start start a new batch job - list, ls list all current batch jobs - status summarize job events on MinIO server in real-time - describe describe job definition for a job - cancel cancel ongoing batch job - -FLAGS: - --config-dir value, -C value path to configuration folder (default: "/root/.mc") [$MC_CONFIG_DIR] - --quiet, -q disable progress bar display [$MC_QUIET] - --no-color disable color theme [$MC_NO_COLOR] - --json enable JSON lines formatted output [$MC_JSON] - --debug enable debug output [$MC_DEBUG] - --insecure disable SSL certificate verification [$MC_INSECURE] - --limit-upload value limits uploads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_UPLOAD] - --limit-download value limits downloads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_DOWNLOAD] - --help, -h show help -``` - -*Example: Generate a new batch `replication` job definition.* - -```sh -mc batch generate myminio replicate > replication.yaml -``` -*Example: Start a new batch 'replication' job.* - -```sh -mc batch start myminio ./replication.yaml -``` - -*Example: List all current batch jobs of type `replicate`.* - -```sh -mc batch list myminio/ --type "replicate" -``` - -*Example: Display current in-progress JOB events.* -```sh -mc batch status myminio/ KwSysDpxcBU9FNhGkn2dCf -``` - -*Example: Describe current batch job definition.* -```sh -mc batch describe myminio KwSysDpxcBU9FNhGkn2dCf -``` - -*Example: Cancel ongoing batch job.* -```sh -mc batch cancel myminio KwSysDpxcBU9FNhGkn2dCf -``` - - -### Command `legalhold` -`legalhold` sets object legal hold for objects - -```sh -USAGE: - mc legalhold COMMAND [FLAGS | -h] TARGET - -COMMANDS: - set set legal hold for object(s) - clear clear legal hold for object(s) - info show legal hold info for object(s) - help, h Shows a list of commands or help for one command - -FLAGS: - --recursive, -r apply legal hold recursively - --json enable JSON formatted output - --help, -h show help -``` - -*Example: Enable legal hold for objects with prefix `prefix` on bucket `mybucket`* - -```sh -mc legalhold set myminio/mybucket/prefix -r -Object legal hold successfully set for prefix `myminio/mybucket/prefix`. - -``` -*Objects created with prefix `prefix` in the above bucket `mybucket` cannot be deleted until the legal hold is lifted* - -```sh -mc cp ~/test.csv myminio/mybucket/prefix/ -mc rm myminio/mybucket/prefix/test.csv -Removing `myminio/mybucket/prefix/test.csv`. -``` - -This creates delete marker (soft delete) you will be able to check if your objects still exist with - -```sh -mc ls --versions myminio/mybucket/prefix/comp.csv -``` - -*Example: Disable legal hold on a specific object version* -```sh -mc legalhold clear myminio/mybucket/prefix/obj.csv --version-id "HiMFUTOowG6ylfNi4LKxD3ieHbgfgrvC" -``` - -*Example: Show object legal hold recursively for all objects at a prefix* -```sh -mc legalhold info myminio/mybucket/prefix --recursive -``` - - -### Command `pipe` -`pipe` command copies contents of stdin to a target. When no target is specified, it writes to stdout. - -```sh -USAGE: - mc pipe [FLAGS] [TARGET] - -FLAGS: - --encrypt value encrypt objects (using server-side encryption with server managed keys) - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefix values - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Stream MySQL database dump to Amazon S3 directly.* - -```sh -mysqldump -u root -p ******* accountsdb | mc pipe s3/sql-backups/backups/accountsdb-oct-9-2015.sql -``` - - - -### Command `cp` -`cp` command copies data from one or more sources to a target. All copy operations to object storage are verified with MD5SUM checksums. Interrupted or failed copy operations can be resumed from the point of failure. - -```sh -USAGE: - mc cp [FLAGS] SOURCE [SOURCE...] TARGET - -FLAGS: - --rewind value roll back object(s) to current version at specified time - --version-id value, --vid value select an object version to copy - --recursive, -r copy recursively - --older-than value copy object(s) older than value in duration string (e.g. 7d10h31s) - --newer-than value copy object(s) newer than value in duration string (e.g. 7d10h31s) - --storage-class value, --sc value set storage class for new object(s) on target - --preserve,-a preserve file system attributes and bucket policy rules on target bucket(s) - --attr add custom metadata for the object (format: KeyName1=string;KeyName2=string) - --continue, -c create or resume copy session - --encrypt value encrypt/decrypt objects (using server-side encryption with server managed keys) - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) - --tags value apply tags to the uploaded objects (eg. key=value&key2=value2, etc) - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Copy a text file to an object storage.* - -```sh -mc cp myobject.txt play/mybucket -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -``` - -*Example: Copy a text file to an object storage with specified metadata.* - -```sh -mc cp --attr key1=value1;key2=value2 myobject.txt play/mybucket -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -``` - -*Example: Copy a folder recursively from MinIO cloud storage to Amazon S3 cloud storage with specified metadata.* -```sh -mc cp --attr Cache-Control=max-age=90000,min-fresh=9000\;key1=value1\;key2=value2 --recursive play/mybucket/burningman2011/ s3/mybucket/ -https://play.minio.io:9000/mybucket/myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -``` - -*Example: Copy a text file to an object storage and assign storage-class `REDUCED_REDUNDANCY` to the uploaded object.* - -```sh -mc cp --storage-class REDUCED_REDUNDANCY myobject.txt play/mybucket -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -``` - -*Example: Copy a server-side encrypted file to an object storage.* - -```sh -mc cp --recursive --encrypt-key "s3/documents/=32byteslongsecretkeymustbegiven1 , myminio/documents/=32byteslongsecretkeymustbegiven2" s3/documents/myobject.txt myminio/documents/ -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -``` - -*Example: Perform key-rotation on a server-side encrypted object* - -```sh -mc cp --encrypt-key 'myminio1/mybucket=32byteslongsecretkeymustgenerate , myminio2/mybucket/=32byteslongsecretkeymustgenerat1' myminio1/mybucket/encryptedobject myminio2/mybucket/encryptedobject -encryptedobject: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -``` -Notice that two different aliases myminio1 and myminio2 are used for the same endpoint to provide the old secretkey and the newly rotated key. - -*Example: Copy a javascript file to object storage and assign Cache-Control header to the uploaded object* - -```sh -mc cp --attr Cache-Control=no-cache myscript.js play/mybucket -myscript.js: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -``` - -*Example: Copy a text file to an object storage and preserve the filesyatem attributes.* - -```sh -mc cp -a myobject.txt play/mybucket -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -``` - -*Example: Roll back to object version to 10 days earlier while copying.* -```sh -mc cp --rewind 10d play/mybucket/myobject.txt myobject.txt -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -``` - - -### Command `mv` -`mv` command moves data from one or more sources to a target. All move operations to object storage are verified with MD5SUM checksums. Interrupted or failed move operations can be resumed from the point of failure. - -```sh -USAGE: - mc mv [FLAGS] SOURCE [SOURCE...] TARGET - -FLAGS: - --recursive, -r move recursively - --older-than value move object(s) older than value in duration string (e.g. 7d10h31s) - --newer-than value move object(s) newer than value in duration string (e.g. 7d10h31s) - --storage-class value, --sc value set storage class for new object(s) on target - --preserve,-a preserve file system attributes and bucket policy rules on target bucket(s) - --attr add custom metadata for the object (format: KeyName1=string;KeyName2=string) - --continue, -c create or resume move session - --encrypt value encrypt/decrypt objects (using server-side encryption with server managed keys) - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Move a text file to an object storage.* - -```sh -mc mv myobject.txt play/mybucket -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -Waiting move operations to complete -``` - -*Example: Move a text file to an object storage with specified metadata.* - -```sh -mc mv --attr key1=value1;key2=value2 myobject.txt play/mybucket -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -Waiting move operations to complete -``` - -*Example: Move a folder recursively from MinIO cloud storage to Amazon S3 cloud storage with specified metadata.* -```sh -mc mv --attr Cache-Control=max-age=90000,min-fresh=9000\;key1=value1\;key2=value2 --recursive play/mybucket/burningman2011/ s3/mybucket/ -https://play.minio.io:9000/mybucket/myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -Waiting move operations to complete -``` - -*Example: Move a text file to an object storage and assign storage-class `REDUCED_REDUNDANCY` to the uploaded object.* - -```sh -mc mv --storage-class REDUCED_REDUNDANCY myobject.txt play/mybucket -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -Waiting move operations to complete -``` - -*Example: Move a server-side encrypted file to an object storage.* - -```sh -mc mv --recursive --encrypt-key "s3/documents/=32byteslongsecretkeymustbegiven1 , myminio/documents/=32byteslongsecretkeymustbegiven2" s3/documents/myobject.txt myminio/documents/ -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -Waiting move operations to complete -``` - -*Example: Perform key-rotation on a server-side encrypted object* - -```sh -mc mv --encrypt-key 'myminio1/mybucket=32byteslongsecretkeymustgenerate , myminio2/mybucket/=32byteslongsecretkeymustgenerat1' myminio1/mybucket/encryptedobject myminio2/mybucket/encryptedobject -encryptedobject: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -Waiting move operations to complete -``` -Notice that two different aliases myminio1 and myminio2 are used for the same endpoint to provide the old secretkey and the newly rotated key. - -*Example: Move a javascript file to object storage and assign Cache-Control header to the uploaded object* - -```sh -mc mv --attr Cache-Control=no-cache myscript.js play/mybucket -myscript.js: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -Waiting move operations to complete -``` - -*Example: Move a text file to an object storage and preserve the filesyatem attributes.* - -```sh -mc mv -a myobject.txt play/mybucket -myobject.txt: 14 B / 14 B ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓ 100.00 % 41 B/s 0 -Waiting move operations to complete -``` - - -### Command `rm` -Use `rm` command to remove file or object - -```sh -USAGE: - mc rm [FLAGS] TARGET [TARGET ...] - -FLAGS: - --versions remove object(s) and all its versions - --rewind value roll back object(s) to current versions at specified time - --version-id value, --vid value delete a specific version of an object - --recursive, -r remove recursively - --force allow a recursive remove operation - --dangerous allow site-wide removal of objects - --incomplete, -I remove incomplete uploads - --dry-run perform a fake remove operation - --stdin read object names from STDIN - --older-than value remove objects older than value in duration string (e.g. 7d10h31s) - --newer-than value remove objects newer than value in duration string (e.g. 7d10h31s) - --bypass bypass governance - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Remove a single object.* - -```sh -mc rm play/mybucket/myobject.txt -Removing `play/mybucket/myobject.txt`. -``` -*Example: Remove an encrypted object.* - -```sh -mc rm --encrypt-key "play/mybucket=32byteslongsecretkeymustbegiven1" play/mybucket/myobject.txt -Removing `play/mybucket/myobject.txt`. -``` - -*Example: Recursively remove a bucket's contents. Since this is a dangerous operation, you must explicitly pass `--force` option.* - -```sh -mc rm --recursive --force play/mybucket -Removing `play/mybucket/newfile.txt`. -Removing `play/mybucket/otherobject.txt`. -``` - -*Example: Remove all uploaded incomplete files for an object.* - -```sh -mc rm --incomplete play/mybucket/myobject.1gig -Removing `play/mybucket/myobject.1gig`. -``` -*Example: Remove object and output a message only if the object is created older than 1 day, 2 hours and 30 minutes. Otherwise, the command stays quiet and nothing is printed out.* - -```sh -mc rm -r --force --older-than 1d2h30m myminio/mybucket -Removing `myminio/mybucket/dayOld1.txt`. -Removing `myminio/mybucket/dayOld2.txt`. -Removing `myminio/mybucket/dayOld3.txt`. -``` - -*Example: Remove a particular version ID.* - -```sh -mc rm myminio/docs/money.xls --version-id "f20f3792-4bd4-4288-8d3c-b9d05b3b62f6" -Removing `myminio/docs/money.xls` (versionId=f20f3792-4bd4-4288-8d3c-b9d05b3b62f6). -``` - -*Example: Remove all object versions older than one year.* - -```sh -mc rm myminio/docs/ --recursive --versions --rewind 365d -Removing `myminio/docs/foo.xls` (versionId=4d184091-ca84-4730-8d73-9e51a1016dc2, modTime=2019-08-05 13:42:08 +0000 UTC). -Removing `myminio/docs/foo.xls` (versionId=9f716132-81ad-480b-a315-e44144b252a0, modTime=2019-08-05 13:41:59 +0000 UTC). -``` - - -### Command `share` -`share` command securely grants upload or download access to object storage. This access is only temporary and it is safe to share with remote users and applications. If you want to grant permanent access, you may look at `mc anonymous` command instead. - -Generated URL has access credentials encoded in it. Any attempt to tamper the URL will invalidate the access. To understand how this mechanism works, please follow [Pre-Signed URL](http://docs.aws.amazon.com/AmazonS3/latest/dev/ShareObjectPreSignedURL.html) technique. - -```sh -USAGE: - mc share [FLAGS] COMMAND - -FLAGS: - --help, -h show help - -COMMANDS: - download generate URLs for download access - upload generate ‘curl’ command to upload objects without requiring access/secret keys - list list previously shared objects and folders -``` - -### Sub-command `share download` - Share Download -`share download` command generates URLs to download objects without requiring access and secret keys. Expiry option sets the maximum validity period (no more than 7 days), beyond which the access is revoked automatically. - -``` -USAGE: - mc share download [FLAGS] TARGET [TARGET...] - -FLAGS: - --version-id value, --vid value share a particular object version - --recursive, -r share all objects recursively - --expire value, -E value set expiry in NN[h|m|s] (default: "168h") - --help, -h show help -``` - -*Example: Grant temporary access to an object with 4 hours expiry limit.* - -```sh -mc share download --expire 4h play/mybucket/myobject.txt -URL: https://play.min.io/mybucket/myobject.txt -Expire: 0 days 4 hours 0 minutes 0 seconds -Share: https://play.min.io/mybucket/myobject.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=Q3AM3UQ867SPQQA43P2F%2F20160408%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20160408T182008Z&X-Amz-Expires=14400&X-Amz-SignedHeaders=host&X-Amz-Signature=1527fc8f21a3a7e39ce3c456907a10b389125047adc552bcd86630b9d459b634 -``` - -*Example: Share a particular version of an object* -```sh -mc share download --version-id 3Jr2x6fqlBUsVzbvPihBO3HgNpgZgAnp play/mybucket/myobject.txt -URL: https://play.min.io/mybucket/myobject.txt -Expire: 7 days 0 hours 0 minutes 0 seconds -Share: https://play.min.io/mybucket/myobject.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=Q3AM3UQ867SPQQA43P2F%2F20160408%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20160408T182008Z&X-Amz-Expires=604800&versionId=3Jr2x6fqlBUsVzbvPihBO3HgNpgZgAnp&X-Amz-SignedHeaders=host&X-Amz-Signature=1527fc8f21a3a7e39ce3c456907a10b389125047adc552bcd86630b9d459b634 -``` - -#### Sub-command `share upload` - Share Upload -`share upload` command generates a ‘curl’ command to upload objects without requiring access/secret keys. Expiry option sets the maximum validity period (no more than 7 days), beyond which the access is revoked automatically. Content-type option restricts uploads to only certain type of files. - -```sh -USAGE: - mc share upload [FLAGS] TARGET [TARGET...] - -FLAGS: - --recursive, -r recursively upload any object matching the prefix - --expire value, -E value set expiry in NN[h|m|s] (default: "168h") - --content-type value, -T value specify a content-type to allow - --help, -h show help -``` - -*Example: Generate a `curl` command to enable upload access to `play/mybucket/myotherobject.txt`. User replaces `` with the actual filename to upload* - -```sh -mc share upload play/mybucket/myotherobject.txt -URL: https://play.min.io/mybucket/myotherobject.txt -Expire: 7 days 0 hours 0 minutes 0 seconds -Share: curl https://play.min.io/mybucket -F x-amz-date=20160408T182356Z -F x-amz-signature=de343934bd0ba38bda0903813b5738f23dde67b4065ea2ec2e4e52f6389e51e1 -F bucket=mybucket -F policy=eyJleHBpcmF0aW9uIjoiMjAxNi0wNC0xNVQxODoyMzo1NS4wMDdaIiwiY29uZGl0aW9ucyI6W1siZXEiLCIkYnVja2V0IiwibXlidWNrZXQiXSxbImVxIiwiJGtleSIsIm15b3RoZXJvYmplY3QudHh0Il0sWyJlcSIsIiR4LWFtei1kYXRlIiwiMjAxNjA0MDhUMTgyMzU2WiJdLFsiZXEiLCIkeC1hbXotYWxnb3JpdGhtIiwiQVdTNC1ITUFDLVNIQTI1NiJdLFsiZXEiLCIkeC1hbXotY3JlZGVudGlhbCIsIlEzQU0zVVE4NjdTUFFRQTQzUDJGLzIwMTYwNDA4L3VzLWVhc3QtMS9zMy9hd3M0X3JlcXVlc3QiXV19 -F x-amz-algorithm=AWS4-HMAC-SHA256 -F x-amz-credential=Q3AM3UQ867SPQQA43P2F/20160408/us-east-1/s3/aws4_request -F key=myotherobject.txt -F file=@ -``` - -#### Sub-command `share list` - Share List -`share list` command lists unexpired URLs that were previously shared - -```sh -USAGE: - mc share list COMMAND - -COMMAND: - upload: list previously shared access to uploads. - download: list previously shared access to downloads. -``` - - -### Command `mirror` -`mirror` command synchronizes data between filesystems and object storages, similarly to `rsync`. - -```sh -USAGE: - mc mirror [FLAGS] SOURCE TARGET - -FLAGS: - --overwrite overwrite object(s) on target if it differs from source - --dry-run perform a fake mirror operation - --watch, -w watch and synchronize changes - --remove remove extraneous object(s) on target - --region value specify region when creating new bucket(s) on target (default: "us-east-1") - --preserve, -a preserve file(s)/object(s) attributes and bucket(s) policy/locking configuration(s) on target bucket(s) - --md5 force all upload(s) to calculate md5sum checksum - --active-active enable active-active multi-site setup - --disable-multipart disable multipart upload feature - --exclude value exclude object(s) that match specified object name pattern - --exclude-bucket value exclude bucket(s) that match specified bucket name pattern - --exclude-storageclass value exclude object(s) that match the specified storage class - --older-than value filter object(s) older than value in duration string (e.g. 7d10h31s) - --newer-than value filter object(s) newer than value in duration string (e.g. 7d10h31s) - --storage-class value, --sc value specify storage class for new object(s) on target - --attr value add custom metadata for all objects - --monitoring-address value if specified, a new prometheus endpoint will be created to report mirroring activity. (eg: localhost:8081) - --retry if specified, will enable retrying on a per object basis if errors occur - --summary print a summary of the mirror session - --skip-errors skip any errors when mirroring - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) [$MC_ENCRYPT_KEY] - --encrypt value encrypt/decrypt objects (using server-side encryption with server managed keys) [$MC_ENCRYPT] - --config-dir value, -C value path to configuration folder (default: "/root/.mc") [$MC_CONFIG_DIR] - --quiet, -q disable progress bar display [$MC_QUIET] - --no-color disable color theme [$MC_NO_COLOR] - --json enable JSON lines formatted output [$MC_JSON] - --debug enable debug output [$MC_DEBUG] - --insecure disable SSL certificate verification [$MC_INSECURE] - --limit-upload value limits uploads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_UPLOAD] - --limit-download value limits downloads to a maximum rate in KiB/s, MiB/s, GiB/s. (default: unlimited) [$MC_LIMIT_DOWNLOAD] - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT: list of comma delimited prefixes - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Mirror a local directory to 'mybucket' on https://play.min.io.* - -```sh -mc mirror localdir/ play/mybucket -localdir/b.txt: 40 B / 40 B ┃▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓┃ 100.00 % 73 B/s 0 -``` - -*Example: Continuously watch for changes on a local directory and mirror the changes to 'mybucket' on https://play.min.io.* - -```sh -mc mirror -w localdir play/mybucket -localdir/new.txt: 10 MB / 10 MB ┃▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓┃ 100.00 % 1 MB/s 15s -``` - - -### Command `find` -``find`` command finds files which match the given set of parameters. It only lists the contents which match the given set of criteria. - -```sh -USAGE: - mc find PATH [FLAGS] - -FLAGS: - --exec value spawn an external process for each matching object (see FORMAT) - --ignore value exclude objects matching the wildcard pattern - --name value find object names matching wildcard pattern - --newer value match all objects newer than value in duration string (e.g. 7d10h31s) - --older value match all objects older than value in duration string (e.g. 7d10h31s) - --path value match directory names matching wildcard pattern - --print value print in custom format to STDOUT (see FORMAT) - --regex value match directory and object name with PCRE regex pattern - --larger value match all objects larger than specified size in units (see UNITS) - --smaller value match all objects smaller than specified size in units (see UNITS) - --maxdepth value limit directory navigation to specified depth (default: 0) - --watch monitor a specified path for newly created object(s) - ... - ... - --help, -h show help -``` - -*Example: Find all jpeg images from s3 bucket and copy to MinIO "play/bucket" bucket continuously.* -```sh -mc find s3/bucket --name "*.jpg" --watch --exec "mc cp {} play/bucket" -``` - - -### Command `diff` -``diff`` command computes the differences between the two directories. It only lists the contents which are missing or which differ in size. - -It *DOES NOT* compare the contents, so it is possible that the objects which are of same name and of the same size, but have difference in contents are not detected. This way, it can perform high speed comparison on large volumes or between sites - -```sh -USAGE: - mc diff [FLAGS] FIRST SECOND - -FLAGS: - --config-folder value, -C value Path to configuration folder. (default: "/root/.mc") - --quiet, -q Disable progress bar display. - --no-color Disable color theme. - --json Enable JSON formatted output. - --debug Enable debug output. - --insecure Disable SSL certificate verification. - --help, -h Show help. - -LEGEND: - < - object is only in source. - > - object is only in destination. - ! - newer object is in source. -``` - -*Example: Compare a local directory and a remote object storage.* - -```sh - mc diff localdir play/mybucket -‘localdir/notes.txt’ and ‘https://play.min.io/mybucket/notes.txt’ - only in first. -``` - -### Option [--json] -JSON option enables parseable output in [JSON lines](http://jsonlines.org/) format. - -*Example: diff json output.* - -```sh -mc diff minio1/diffbucket minio2/diffbucket --json -{"status":"success","first":"","second":"http://127.0.0.1:9001/diffbucket/file1.png","diff":5} -{"status":"success","first":"http://127.0.0.1:9000/diffbucket/file2.png","second":"","diff":4} -{"status":"success","first":"http://127.0.0.1:9000/diffbucket/file3.png","second":"http://127.0.0.1:9001/diffbucket/file3.png","diff":2} -{"status":"success","first":"http://127.0.0.1:9000/diffbucket/file4.png","second":"http://127.0.0.1:9001/diffbucket/file4.png","diff":1} -``` - -#### Diff values in json output -| Constant | Value | Meaning | -| ---------- | ---------- | -------- | -| differInUnknown | 0 | Could not perform diff due to error | -| differInNone | 1 | Does not differ | -| differInSize | 2 | Differs in size | -| differInMetadata | 3 | Differs in metadata | -| differInType | 4 | Differs in type exfile/directory | -| differInFirst | 5 | Only in source (FIRST) | -| differInSecond | 6 | Only in target (SECOND) | -| differInAASourceMTime | 7 | Differs in active-active source modtime | - - -### Command `watch` -``watch`` provides a convenient way to watch on various types of event notifications on object -storage and filesystem. - -```sh -USAGE: - mc watch [FLAGS] PATH - -FLAGS: - --events value filter specific types of events, defaults to all events (default: "put,delete,get") - --prefix value filter events for a prefix - --suffix value filter events for a suffix - --recursive recursively watch for events - --help, -h show help -``` - -*Example: Watch for all events on object storage* - -```sh -mc watch play/testbucket -[2016-08-18T00:51:29.735Z] 2.7KiB ObjectCreated https://play.min.io/testbucket/CONTRIBUTING.md -[2016-08-18T00:51:29.780Z] 1009B ObjectCreated https://play.min.io/testbucket/MAINTAINERS.md -[2016-08-18T00:51:29.839Z] 6.9KiB ObjectCreated https://play.min.io/testbucket/README.md -``` - -*Example: Watch for all events on local directory* - -```sh -mc watch ~/Photos -[2016-08-17T17:54:19.565Z] 3.7MiB ObjectCreated /home/minio/Downloads/tmp/5467026530_a8611b53f9_o.jpg -[2016-08-17T17:54:19.565Z] 3.7MiB ObjectCreated /home/minio/Downloads/tmp/5467026530_a8611b53f9_o.jpg -... -[2016-08-17T17:54:19.565Z] 7.5MiB ObjectCreated /home/minio/Downloads/tmp/8771468997_89b762d104_o.jpg -``` - - -### Command `event` -``event`` provides a convenient way to manage various types of event notifications on a bucket. MinIO event notification can be configured to use AMQP, Redis, ElasticSearch, NATS and PostgreSQL services. MinIO configuration provides more details on how these services can be configured. - -```sh -USAGE: - mc event COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - add add a new bucket notification - remove remove a bucket notification. With '--force' can remove all bucket notifications - list list bucket notifications - -FLAGS: - --ignore-existing, -p ignore if event already exists - --help, -h show help -``` - -*Example: List all configured bucket notifications* - -```sh -mc event list play/andoria -MyTopic arn:minio:sns:us-east-1:1:TestTopic s3:ObjectCreated:*,s3:ObjectRemoved:* suffix:.jpg -``` - -*Example: Add a new 'sqs' notification resource only to notify on ObjectCreated event* - -```sh -mc event add play/andoria arn:minio:sqs:us-east-1:1:your-queue --event put -``` - -*Example: Add a new 'sqs' notification resource with filters* - -Add `prefix` and `suffix` filtering rules for `sqs` notification resource. - -```sh -mc event add play/andoria arn:minio:sqs:us-east-1:1:your-queue --prefix photos/ --suffix .jpg -``` - -*Example: Remove a 'sqs' notification resource* - -```sh -mc event remove play/andoria arn:minio:sqs:us-east-1:1:your-queue -``` - - -### Command `ilm` -``ilm`` - A convenient way to manage bucket lifecycle configuration. - -```sh -USAGE: - mc ilm COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - ls list lifecycle configuration rules set on a bucket - add add a lifecycle configuration rule to existing (if any) rule(s) on a bucket - rm remove (if any) existing lifecycle configuration rule - edit modify a lifecycle configuration rule with given id - export export lifecycle configuration in JSON format - import import lifecycle configuration in JSON format - -FLAGS: - --help, -h show help - -``` - -*Example: List the lifecycle management rules* - -```sh -mc ilm ls myminio/testbucket - ID | Prefix | Enabled | Expiry | Date/Days | Transition | Date/Days | Storage-Class | Tags ----------|--------|---------|--------|--------------|------------|-----------|---------------|------ - Devices | dev/ | ✓ | ✓ | 17 Sep 2020 | ✗ | | | ----------|--------|---------|--------|--------------|------------|-----------|---------------|------ -``` - -For more details about the lifecycle configuration, refer to official AWS S3 documentation [here](https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html) - -*Example: Edit the lifecycle management configuration rule given by ID "btd6pdot8748n94elvl0" to set tags* -```sh -mc ilm edit --id "Documents" --tags "k1=v1&k2=v2" play/testbucket/dev -Lifecycle configurtaion rule with ID `btd6pdot8748n94elvl0` modified to play/testbucket/dev. -``` - -*Example: Remove the lifecycle management configuration rule given by ID "Documents"* -```sh -mc ilm rm --id "Documents" play/testbucket/dev -Rule ID `Documents` from target play/testbucket/dev removed. -``` - - -### Command `anonymous` -Manage anonymous bucket policies to a bucket and its contents - -```sh -USAGE: - mc anonymous [FLAGS] set PERMISSION TARGET - mc anonymous [FLAGS] set-json FILE TARGET - mc anonymous [FLAGS] get TARGET - mc anonymous [FLAGS] get-json TARGET - mc anonymous [FLAGS] list TARGET - -PERMISSION: - Allowed policies are: [private, public, download, upload]. - -FILE: - A valid S3 anonymous JSON filepath. - -FLAGS: - --help, -h show help -``` - -*Example: Show current anonymous bucket policy* - -Show current anonymous bucket policy for ``mybucket/myphotos/2020/`` sub-directory - -```sh -mc anonymous get play/mybucket/myphotos/2020/ -Access permission for ‘play/mybucket/myphotos/2020/’ is ‘none’ -``` - -*Example : Set anonymous bucket policy to download only* - -Set anonymous bucket policy for ``mybucket/myphotos/2020/`` sub-directory and its objects to ``download`` only. Now, objects under the sub-directory are publicly accessible. e.g ``mybucket/myphotos/2020/yourobjectname``is available at [https://play.min.io:9000/mybucket/myphotos/2020/yourobjectname](https://play.min.io:9000/mybucket/myphotos/2020/yourobjectname) - -```sh -mc anonymous set download play/mybucket/myphotos/2020/ -Access permission for ‘play/mybucket/myphotos/2020/’ is set to 'download' -``` - -*Example : Set anonymous bucket policy from a JSON file* - -Configure bucket policy for ``mybucket`` with a policy JSON file. - -```sh -mc anonymous set-json /tmp/policy.json play/mybucket -Access permission for `play/mybucket` is set from `/tmp/policy.json` -``` - -*Example : Set current anonymous bucket policy to private* - -Set anonymous bucket policy for *mybucket/myphotos/2020/* sub-directory to ``private``. This is equivalent to removing any bucket policies. - -```sh -mc anonymous set private play/mybucket/myphotos/2020/ -Access permission for ‘play/mybucket/myphotos/2020/’ is set to 'private' -``` - -*Example : List policies set to `mybucket`.* - -```sh -mc anonymous list s3/mybucket -``` - - -### Command `tag` -` tag` command provides a convenient way to set, remove, and list bucket/object tags. Tags are defined as key-value pairs. - -```sh -USAGE: - mc tag COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - list list tags of a bucket or an object - remove remove tags assigned to a bucket or an object - set set tags for a bucket or an object - -FLAGS: - --help, -h show help - --json enable JSON formatted output - --debug enable debug output -``` - -*Example : List tags assigned to an object* - -List tags for `testobject` in `testbucket` in alias `s3` -```sh -mc tag list s3/testbucket/testobject -Name : testobject -editable : only-by-owner-and-authenticated -confidentiality : open-to-authenticated-only -``` - -*Example : Set tags for an object* - -Set tags for `testobject` in `testbucket` in alias `s3` -```sh -mc tag set s3/testbucket/testobject "key1=value1&key2=value2&key3=value3" -Tags set for s3/testbucket/testobject. -``` - -*Example : Remove tags assigned to an object* - -Remove tags assigned to `testobject` in `testbucket` in alias `s3` -```sh -mc tag remove s3/testbucket/testobject -Tags removed for s3/testbucket/testobject. -``` - -*Example: Assign tags to a object versions older than one week* -```sh -mc tag set --versions --rewind 7d play/testbucket/testobject "status=old" -``` - - -### Command `admin` -Please visit [here](https://min.io/docs/minio/linux/reference/minio-mc-admin.html?ref=gh) for a more comprehensive admin guide. - - -### Command `alias` -`alias` command provides a convenient way to manage aliases entries in your config file `~/.mc/config.json`. It is also OK to edit the config file manually using a text editor. - -```sh -USAGE: - mc alias COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - set, s add a new alias to configuration file - remove, rm remove an alias from configuration file - list, ls lists aliases in configuration file - -FLAGS: - --help, -h show help -``` - -*Example: Manage Config File* - -Add MinIO server access and secret keys to config file alias entry. Note that, the history feature of your shell may record these keys and pose a security risk. On `bash` shell, use `set -o` and `set +o` to disable and enable history feature momentarily. - -```sh -set +o history -mc alias set myminio http://localhost:9000 OMQAGGOL63D7UNVQFY8X GcY5RHNmnEWvD/1QxD3spEIGj+Vt9L7eHaAaBTkJ -set -o history -``` - -Remove the alias from the config file. - -```sh -mc alias remove myminio -``` - -List all configured aliases - -```sh -mc alias list -``` - - -### Command `update` -Check for new software updates from [https://dl.min.io](https://dl.min.io). Experimental flag checks for unstable experimental releases primarily meant for testing purposes. - -```sh -USAGE: - mc update [FLAGS] - -FLAGS: - --quiet, -q suppress chatty console output - --json enable JSON formatted output - --help, -h show help -``` - -*Example: Check for an update.* - -```sh -mc update -You are already running the most recent version of ‘mc’. -``` - - -### Command `stat` -`stat` command displays information on objects (with optional prefix) contained in the specified bucket on an object storage. On a filesystem, it behaves like `stat` command. - -```sh -USAGE: - mc stat [FLAGS] TARGET - -FLAGS: - --rewind value stat on older version(s) - --versions stat all versions - --version-id value, --vid value stat a specific object version - --recursive, -r stat all objects recursively - --encrypt-key value encrypt/decrypt objects (using server-side encryption with customer provided keys) - --help, -h show help - -ENVIRONMENT VARIABLES: - MC_ENCRYPT_KEY: list of comma delimited prefix=secret values -``` - -*Example: Display information on a bucket named "mybucket" on https://play.min.io.* - - -```sh -mc stat play/mybucket -Name : mybucket/ -Date : 1969-12-31 16:00:00 PST -Size : 0 B -Type : folder -Metadata : - Encryption: Not Set - Versioning: Enabled - LockConfiguration: - RetentionMode: COMPLIANCE - Retention Until Date: 1DAYS - Notification: Unset - Replication: Enabled - Policy: none - Location: us-east-1 - Tagging: key1:value1, key2:value2 - ILM: Not Set -``` - -*Example: Display information on an encrypted object "myobject" in "mybucket" on https://play.min.io.* - - -```sh -mc stat play/mybucket/myobject --encrypt-key "play/mybucket=32byteslongsecretkeymustbegiven1" -Name : myobject -Date : 2018-03-02 11:47:13 PST -Size : 132B -ETag : d03ba22cd78282b7aef705bf31b8cded -Type : file -Metadata : - Content-Type : application/octet-stream - X-Amz-Server-Side-Encryption-Customer-Key-Md5 : 4xSRdYsabg+s2nlsHKhgnw== - X-Amz-Server-Side-Encryption-Customer-Algorithm: AES256 -``` - -*Example: Display information on objects contained in the bucket named "mybucket" on https://play.min.io.* - -```sh -mc stat -r play/mybucket -Name : mybucket/META/textfile -Date : 2018-02-06 18:17:38 PST -Size : 1024B -ETag : d41d8cd98f00b204e9800998ecf8427e -Type : file -Metadata : - Content-Type: application/octet-stream - -Name : mybucket/emptyfile -Date : 2018-02-06 18:16:14 PST -Size : 100B -ETag : d41d8cd98f00b204e9800998ecf8427e -Type : file -Metadata : - Content-Type: application/octet-stream -``` - -*Example: Stat a specific object version* -```sh -mc stat --version-id "CL3sWgdSN2pNntSf6UnZAuh2kcu8E8si" s3/personal-docs/2018-account_report.docx -Name : s3/personal-docs/2018-account_report.docx -Date : 2018-02-06 18:16:14 PST -Size : 100B -ETag : d41d8cd98f00b204e9800998ecf8427e -Type : file -Metadata : - Content-Type: application/vnd.openxmlformats-officedocument.wordprocessingml.document -``` - - - -### Command `version` -`version` manage bucket versioning - -```sh -NAME: - mc version - manage bucket versioning - -USAGE: - mc version TARGET [enable | suspend | info] - -COMMANDS: - enable Enable bucket versioning - suspend Suspend bucket versioning - info Show bucket versioning status -``` - -*Example: Enable versioning on bucket `mybucket`* - -```sh -mc version enable myminio/mybucket -myminio/mybucket versioning is enabled -``` - -*Example: Display the version configuration for bucket `mybucket`* - -```sh -mc version info myminio/mybucket -myminio/mybucket versioning status is enabled - -``` -*Example: Suspend versioning for bucket `mybucket`* -```sh -mc version suspend myminio/mybucket -myminio/mybucket versioning is suspended -``` - - -### Command `undo` -`undo` reverts latest PUT/DELETE operations - -```sh -NAME: - mc undo - undo PUT/DELETE operations - -USAGE: - mc undo [FLAGS] SOURCE - -FLAGS: - --recursive, -r undo last S3 put/delete operations - --force force recursive operation - --last value undo N last changes (default: 1) - --dry-run fake an undo operation - --help, -h show help -``` - -*Example: Undo the last 3 uploads and/or removals of a particular object* - -```sh -mc undo s3/backups/file.zip --last 3 -✓ Last delete of `CREDITS` is reverted. -✓ Last upload of `CREDITS` (vid=mj2juHIoyvU94s8kIim5H.Z9L0QO50wO) is reverted. -✓ Last upload of `CREDITS` (vid=przFKd1iWC7ts_8FNoIvLae8NH_BAi_X) is reverted. -``` - - -### Command `encrypt` -`encrypt` manages bucket encryption config - -```sh -NAME: - mc encrypt - manage bucket encryption config - -USAGE: - mc encrypt COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - set Set encryption config - clear Clear encryption config - info Show bucket encryption status - -FLAGS: - --help, -h show help -``` - -*Example: Display bucket encryption status for bucket `mybucket`* - -```sh -mc encrypt info myminio/mybucket -Algorithm: AES256 -``` - -*Example: Set SSE-S3 auto encryption for bucket `mybucket` on alias `myminio`* - -```sh -mc encrypt set sse-s3 myminio/mybucket -Auto encryption has been set successfully for myminio/source -``` - -*Example: Set SSE-KMS auto encryption for bucket `mybucket` on alias `myminio` with KMS Key Id "arn:aws:kms:us-east-1:xxx:key/xxx"* - -```sh -mc encrypt set sse-kms "arn:aws:kms:us-east-1:xxx:key/xxx" myminio/mybucket -Auto encryption has been set successfully for myminio/source -``` - -*Example: Clear auto encryption config for bucket `mybucket` on alias `myminio`* - -```sh -mc encrypt clear myminio/mybucket -Auto encryption configuration has been cleared successfully. -``` - - -### Command `replicate` -`replicate` manages bucket server side replication - -```sh -NAME: - mc replicate - manage bucket server side replication - -USAGE: - mc replicate COMMAND [COMMAND FLAGS | -h] [ARGUMENTS...] - -COMMANDS: - add add a server side replication configuration rule - update modify an existing server side replication configuration rule - ls list server side replication configuration rules - status show server side replication status - resync re-replicate all previously replicated objects - export export server side replication configuration - import import server side replication configuration in JSON format - rm remove a server side replication configuration rule - diff show unreplicated object versions - -FLAGS: - --help, -h show help -``` - -*Example: Add replication configuration rule on `mybucket` on alias `myminio`. Enable delete marker replication and replication of versioned deletes for the configuration* - -```sh -mc replicate add myminio/mybucket/prefix --tags "key1=value1&key2=value2" --storage-class "STANDARD" --remote-bucket "http://minio3:minio123@localhost:9006/bucket" --priority 1 -Replication configuration rule applied to myminio/mybucket/prefix. -``` - -*Example: Disable replication configuration rule with rule Id "bsibgh8t874dnjst8hkg" on bucket "mybucket" with prefix "prefix" for alias `myminio`* - -```sh -mc replicate update myminio/mybucket/prefix --id "bsibgh8t874dnjst8hkg" --state disable -Replication configuration rule with ID `bsibgh8t874dnjst8hkg` applied to myminio/mybucket/prefix. -``` - -*Example: Change priority of rule with rule ID "bsibgh8t874dnjst8hkg" on bucket "mybucket" for alias `myminio`.* - -```sh -mc replicate update myminio/mybucket/prefix --id "bsibgh8t874dnjst8hkg" --priority 3 -Replication configuration rule with ID `bsibgh8t874dnjst8hkg` applied to myminio/mybucket/prefix. -``` - -*Example: Clear tags on rule ID "bsibgh8t874dnjst8hkg" for target myminio/bucket which has a replication configuration rule with prefix "prefix"* - -```sh -mc replicate update myminio/mybucket/prefix --id "bsibgh8t874dnjst8hkg" --tags "" -Replication configuration rule with ID `bsibgh8t874dnjst8hkg` applied to myminio/mybucket/prefix successfully. -``` - -*Example: Enable delete marker replication and versioned delete replication on rule ID "bsibgh8t874dnjst8hkg" for target myminio/bucket which has a replication configuration rule with prefix "prefix" - -```sh -mc replicate update myminio/mybucket/prefix --id "bsibgh8t874dnjst8hkg" --replicate "delete,delete-marker" -Replication configuration rule with ID `bsibgh8t874dnjst8hkg` applied to myminio/mybucket/prefix successfully. -``` -*Example: Disable delete marker and versioned delete replication on rule ID "bsibgh8t874dnjst8hkg" for target myminio/bucket which has a replication configuration rule with prefix "prefix" - -```sh -mc replicate update myminio/mybucket/prefix --id "bsibgh8t874dnjst8hkg" --replicate "" -Replication configuration rule with ID `bsibgh8t874dnjst8hkg` applied to myminio/mybucket/prefix successfully. -``` - -*Example: Edit credentials for remote target with replication rule ID kxYD.491 - -```sh -mc replicate update myminio/mybucket --id "kxYD.491" --remote-bucket -https://foobar:newpassword@minio.siteb.example.com/targetbucket -``` - -*Example: List replication configuration rules set on `mybucket` on alias `myminio`* - -```sh -mc replicate ls myminio/mybucket -``` - -*Example: Clear replication configuration for bucket `mybucket` on alias `myminio`* - -```sh -mc replicate rm --all --force myminio/mybucket -Replication configuration has been removed successfully for myminio/mybucket -``` - -*Example: Remove replication configuration rule with id `bsibgh8t874dnjst8hkg` for bucket `mybucket` on alias `myminio`* - -```sh -mc replicate rm --id "bsibgh8t874dnjst8hkg" myminio/mybucket/prefix -Replication configuration rule with id "bsibgh8t874dnjst8hkg" has been removed successfully for myminio/mybucket -``` - -*Example: Import replication configuration for bucket `mybucket` on alias `myminio` from `/data/replicate/config`* - -```sh -mc replicate import myminio/mybucket < /data/replicate/config -Replication configuration successfully set on `myminio/mybucket`. -``` - -*Example: Export replication configuration for bucket `mybucket` on alias `myminio` to `/data/replicate/config`* - -```sh -mc replicate export myminio/mybucket > /data/replicate/config -``` -*Example: Show replication status of `mybucket` on alias `myminio`* - -```sh -mc replicate status myminio/mybucket -``` - -*Example: Resync replication of previously replicated objects from `mybucket` on alias `myminio` to remote target "arn:minio:replication::xxx:mybucket".* - -```sh -mc replicate resync start myminio/mybucket --remote-bucket "arn:minio:replication::xxx:mybucket" -``` - -*Example: Show status of replication resync of target "arn:minio:replication::xxx:mybucket" for `mybucket` on alias `myminio`.* - -```sh -mc replicate resync status myminio/mybucket --remote-bucket "arn:minio:replication::xxx:mybucket" -``` - - - -### Command `support` - support related commands - -```sh -NAME: - mc support register register with MinIO subscription network - mc support callhome configure callhome settings - mc support diag, diagnostics upload health data for diagnostics - mc support perf analyze object, network and drive performance - mc support inspect upload raw object contents for analysis - mc support profile generate profile data for debugging - mc support logs configure/display MinIO console logs - mc support top provide top like statistics for MinIO - -``` - -Register MinIO cluster at alias 'play' on SUBNET, using the name "play-cluster". -```sh -mc support register play --name play-cluster -``` - -Enable logs callhome for cluster with alias 'play'. -```sh -mc support callhome set play logs=on -``` - -Download 'xl.meta' for a specific object from all the drives in a zip file. -```sh -mc support inspect myminio/bucket/test*/xl.meta -``` - -Run object speed measurement with autotuning the concurrency to obtain maximum throughput and IOPs. -```sh -mc support perf object myminio/ -``` - -Upload MinIO diagnostics report for 'play' (https://play.min.io by default) to SUBNET -```sh -mc support diag play -``` - -Get CPU profiling for 2 minutes -```sh -mc support profile --type cpu --duration 120 myminio/ -``` - -Print last 5 application error logs entries for node 'node1' on MinIO server with alias 'myminio' -```sh -mc support logs show --last 5 --type application myminio node1 -``` - -Enable logs for cluster with alias 'play' -```sh -mc support logs enable play -``` - -Get a list of the 10 oldest locks on a distributed MinIO cluster, where 'myminio' is the MinIO cluster alias.* -```sh -mc admin top locks myminio -``` - -Display current in-progress all 's3.PutObject' API calls. -```sh -mc support top api --name s3.PutObject myminio/ -``` - - - -### Command `ping` -`rb` command to perform liveness check - -```sh -USAGE: - mc ping [FLAGS] TARGET - -FLAGS: - --count value, -c value perform liveliness check for count number of times (default: 0) - --error-count value, -e value exit after N consecutive ping errors - --interval value, -i value wait interval between each request in seconds (default: 1) - --distributed, -a ping all the servers in the cluster, use it when you have direct access to nodes/pods - --help, -h show help - - -``` - -*Example: Perform liveness check on https://play.min.io.* - - -```sh -mc ping play -1: https://play.min.io: min=919.538ms max=919.538ms average=919.538ms errors=0 roundtrip=919.538ms -2: https://play.min.io: min=278.356ms max=919.538ms average=598.947ms errors=0 roundtrip=278.356ms -3: https://play.min.io: min=278.356ms max=919.538ms average=504.759ms errors=0 roundtrip=316.384ms -``` - - - -### Command `quota` - Manage bucket quota -`quota` command to set or get bucket quota on MinIO server. - -```sh -NAME: - mc quota - manage bucket quota - -USAGE: - set set bucket quota - info show bucket quota - clear clear bucket quota - -QUOTA - quota accepts human-readable case-insensitive number - suffixes such as "k", "m", "g" and "t" referring to the metric units KB, - MB, GB and TB respectively. Adding an "i" to these prefixes, uses the IEC - units, so that "gi" refers to "gibibyte" or "GiB". A "b" at the end is - also accepted. Without suffixes the unit is bytes. - -``` -*Example: Show bucket quota on bucket 'mybucket' on MinIO.* - -```sh -mc quota info myminio/mybucket -``` - -*Example: Set a hard bucket quota of 64Mb for bucket 'mybucket' on MinIO.* - -```sh -mc quota set myminio/mybucket --size 64MB -``` - -*Example: Reset bucket quota configured for bucket 'mybucket' on MinIO.* - -```sh -mc quota clear myminio/mybucket -``` diff --git a/docs/minio-client-configuration-files.md b/docs/minio-client-configuration-files.md deleted file mode 100644 index 695f89a98c..0000000000 --- a/docs/minio-client-configuration-files.md +++ /dev/null @@ -1,92 +0,0 @@ -# MinIO Client Configuration Files Guide [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -In this document we will walk you through the configuration files of MinIO Client. - -## MinIO Client configuration directory -MinIO Client configurations are stored in file name ``.mc``. It is a hidden file which resides on user's home directory. - -**This how the structure of the directory looks like:** - -``` -tree ~/.mc -/home/supernova/.mc -├── config.json -├── session -└── share -2 directories, 5 files -``` -### Files and directories - -#### ``session`` directory -``session`` directory keeps metadata information of all incomplete upload or mirror. You can run ``mc session list`` to list the same. - -#### ``config.json`` -config.json is the configuration file for MinIO Client, it gets generated after you install and start MinIO. All the credentials, endpoint information we add via ``mc alias`` are stored/modified here. - -``` -cat config.json -{ - "version": "10", - "aliases": { - "XL": { - "url": "http://127.0.0.1:9000", - "accessKey": "YI7S1CKXB76RGOGT6R8W", - "secretKey": "FJ9PWUVNXGPfiI72WMRFepN3LsFgW3MjsxSALroV", - "api": "S3v4", - "path": "auto" - }, - "fs": { - "url": "http://127.0.0.1:9000", - "accessKey": "YI7S1CKXB76RGOGT6R8W", - "secretKey": "FJ9PWUVNXGPfiI72WMRFepN3LsFgW3MjsxSALroV", - "api": "S3v4", - "path": "auto" - }, - "gcs": { - "url": "https://storage.googleapis.com", - "accessKey": "YOUR-ACCESS-KEY-HERE", - "secretKey": "YOUR-SECRET-KEY-HERE", - "api": "S3v2", - "path": "auto" - }, - "play": { - "url": "https://play.min.io", - "accessKey": "Q3AM3UQ867SPQQA43P2F", - "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", - "api": "S3v4", - "path": "auto" - }, - "s3": { - "url": "https://s3.amazonaws.com", - "accessKey": "YOUR-ACCESS-KEY-HERE", - "secretKey": "YOUR-SECRET-KEY-HERE", - "api": "S3v4", - "path": "auto" - }, - "ibm": { - "url": "https://s3.YOUR-REGION.cloud-object-storage.appdomain.cloud", - "accessKey": "YOUR-HMAC-ACCESS-KEY-ID", - "secretKey": "YOUR-HMAC-SECRET-ACCESS-KEY", - "api": "S3v4", - "path": "auto" - } - } -} -``` - -``version`` tells the version of the file. - -``aliases`` stores authentication credentials which will be used by MinIO Client. - -#### ``config.json.old`` -This file keeps previous config file version details. - -#### ``share`` directory -``share`` directory keeps metadata information of all upload and download URL for objects which is used by MinIO client ``mc share`` command. - -## Explore Further -* [MinIO Client Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc.html?ref=gh) - - - - diff --git a/go.mod b/go.mod index ad5767c69e..cd2aea8c08 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/minio/mc -go 1.19 +go 1.21 require ( github.com/charmbracelet/bubbletea v0.25.0 @@ -10,23 +10,23 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/google/uuid v1.5.0 + github.com/google/uuid v1.6.0 github.com/inconshreveable/mousetrap v1.1.0 - github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.4 + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.6 github.com/mattn/go-ieproxy v0.0.11 github.com/mattn/go-isatty v0.0.20 github.com/minio/cli v1.24.2 github.com/minio/colorjson v1.0.6 github.com/minio/filepath v1.0.0 github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/minio-go/v7 v7.0.67 + github.com/minio/minio-go/v7 v7.0.69 github.com/minio/selfupdate v0.6.0 github.com/minio/sha256-simd v1.0.1 github.com/mitchellh/go-homedir v1.1.0 github.com/pkg/xattr v0.4.9 github.com/posener/complete v1.2.3 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.19.0 github.com/prometheus/prom2json v1.3.3 // indirect github.com/rjeczalik/notify v0.9.3 github.com/rs/xid v1.5.0 @@ -45,7 +45,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 github.com/juju/ratelimit v1.0.2 github.com/minio/madmin-go/v3 v3.0.50 - github.com/minio/pkg/v2 v2.0.7 + github.com/minio/pkg/v2 v2.0.16 github.com/muesli/reflow v0.3.0 github.com/olekukonko/tablewriter v0.0.5 github.com/vbauerster/mpb/v8 v8.7.1 @@ -58,14 +58,14 @@ require ( github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/kr/pretty v0.3.1 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/lestrrat-go/jwx v1.2.29 // indirect github.com/minio/mux v1.9.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/safchain/ethtool v0.3.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect ) require ( @@ -74,23 +74,22 @@ require ( github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/jedib0t/go-pretty/v6 v6.4.9 - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kr/text v0.2.0 // indirect github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect github.com/lestrrat-go/blackmagic v1.0.2 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect - github.com/lestrrat-go/jwx v1.2.29 // indirect github.com/lestrrat-go/option v1.0.1 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-localereader v0.0.1 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect @@ -102,27 +101,25 @@ require ( github.com/muesli/termenv v0.15.2 github.com/philhofer/fwd v1.1.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 - github.com/rivo/uniseg v0.4.4 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/common v0.52.2 // indirect + github.com/prometheus/procfs v0.13.0 + github.com/rivo/uniseg v0.4.7 // indirect github.com/secure-io/sio-go v0.3.1 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tinylib/msgp v1.1.9 // indirect github.com/tklauser/go-sysconf v0.3.13 // indirect github.com/tklauser/numcpus v0.7.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.11 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect - go.etcd.io/etcd/client/v3 v3.5.11 // indirect + go.etcd.io/etcd/api/v3 v3.5.13 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect + go.etcd.io/etcd/client/v3 v3.5.13 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/sync v0.5.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.19.0 - google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 // indirect - google.golang.org/grpc v1.60.1 // indirect + google.golang.org/grpc v1.63.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) diff --git a/go.sum b/go.sum index 0bcf3c0435..00d132014a 100644 --- a/go.sum +++ b/go.sum @@ -31,8 +31,9 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -51,10 +52,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -62,8 +61,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -80,11 +79,11 @@ github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -108,8 +107,8 @@ github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmt github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed h1:036IscGBfJsFIgJQzlui7nK1Ncm0tp2ktmPj8xO4N/0= -github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -130,8 +129,6 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/minio/cli v1.24.2 h1:J+fCUh9mhPLjN3Lj/YhklXvxj8mnyE/D6FpFduXJ2jg= github.com/minio/cli v1.24.2/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY= github.com/minio/colorjson v1.0.6 h1:m7TUvpvt0u7FBmVIEQNIa0T4NBQlxrcMBp4wJKsg2Ik= @@ -142,12 +139,12 @@ github.com/minio/madmin-go/v3 v3.0.50 h1:+RQMetVFvPQmAOEDN/xmLhwk9+xOzu3rqwnlZEs github.com/minio/madmin-go/v3 v3.0.50/go.mod h1:ZDF7kf5fhmxLhbGTqyq5efs4ao0v4eWf7nOuef/ljJs= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8= -github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A= +github.com/minio/minio-go/v7 v7.0.69 h1:l8AnsQFyY1xiwa/DaQskY4NXSLA2yrGsW5iD9nRPVS0= +github.com/minio/minio-go/v7 v7.0.69/go.mod h1:XAvOPJQ5Xlzk5o3o/ArO2NMbhSGkimC+bpW/ngRKDmQ= github.com/minio/mux v1.9.0 h1:dWafQFyEfGhJvK6AwLOt83bIG5bxKxKJnKMCi0XAaoA= github.com/minio/mux v1.9.0/go.mod h1:1pAare17ZRL5GpmNL+9YmqHoWnLmMZF9C/ioUCfy0BQ= -github.com/minio/pkg/v2 v2.0.7 h1:vJZ+XUTDeUe/cHpPZSyG/+54252dg6RQKU5K1jXfy/A= -github.com/minio/pkg/v2 v2.0.7/go.mod h1:yayUTo82b0RK+e97hGb1naC787mOtUEyDs3SIcwSyHI= +github.com/minio/pkg/v2 v2.0.16 h1:qBw2D08JE7fu4UORIxx0O4L09NM0wtMrw9sJRU5R1u0= +github.com/minio/pkg/v2 v2.0.16/go.mod h1:V+OP/fKRD/qhJMQpdXXrCXcLYjGMpHKEE26zslthm5k= github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU= github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= @@ -179,25 +176,26 @@ github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= +github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= +github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= +github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/prometheus/prom2json v1.3.3 h1:IYfSMiZ7sSOfliBoo89PcufjWO4eAR0gznGcETyaUgo= github.com/prometheus/prom2json v1.3.3/go.mod h1:Pv4yIPktEkK7btWsrUTWDDDrnpUrAELaOCj+oFwlgmc= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZeY= github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= @@ -215,8 +213,6 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -224,7 +220,6 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -253,17 +248,18 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= -go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= -go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= -go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= -go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU= -go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4= +go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c= +go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg= +go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8= +go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js= +go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -297,8 +293,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -314,7 +310,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -353,16 +348,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY= -google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 h1:s1w3X6gQxwrLEpxnLd/qXTVLgQE2yXwaOaoa6IlY/+o= -google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda h1:b6F6WIV4xHHD0FA4oIyzU6mHWg2WI2X1RBehwa5QN38= +google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=