diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c395195..c99df49 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -38,7 +38,6 @@ jobs: os: - linux - darwin - - windows arch: - amd64 steps: @@ -55,4 +54,4 @@ jobs: GOOS: ${{ matrix.os }} GOARCH: ${{ matrix.arch }} - name: test - run: make test \ No newline at end of file + run: make test diff --git a/.gitignore b/.gitignore index 5e4d18b..01abf54 100644 --- a/.gitignore +++ b/.gitignore @@ -31,3 +31,10 @@ *.out *.app bin/* + +# IDE +.vscode + +# logs +*.log +*.out diff --git a/Makefile b/Makefile index 92b7100..85723a7 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,4 @@ +.PHONY: build test fmt GO ?= go PKG := ./pkg/... @@ -14,9 +15,11 @@ LDFLAGS += -X $(REPO)/pkg/version.GitRef=$(GITREF) build: $(GO) build -ldflags '$(LDFLAGS)' -o $(BUILDTARGET) main.go + chmod +x $(BUILDTARGET) test: $(GO) test -v $(PKG) -short fmt: $(GO) mod tidy && find . -path vendor -prune -o -type f -iname '*.go' -exec go fmt {} \; + diff --git a/README.md b/README.md index 1aee41b..8c1ab75 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,13 @@ # Overview -Backup and Restore (BR) is a CommandLine Interface Tool to back up data of graph spaces of [Nebula Graph](https://github.com/vesoft-inc/nebula-graph) and to restore data from the backup files. +Backup and Restore (BR) is a CommandLine Interface Tool to back up data of graph spaces of [Nebula](https://github.com/vesoft-inc/nebula) and to restore data from the backup files. # Features - Full backup or restore in one-click operation - Supported multiple backend types for storing the backup files: - Local Disk - - Hadoop HDFS - - Alibaba Cloud OSS - - Amazon S3 (_EXPERIMENTAL_) -- Supports backing up data of entire Nebula Graph cluster or specified spaces of it(_EXPERIMENTAL_) + - S3-Compatiable Storage(such as Alibaba Cloud OSS, Amazon S3, MinIO, Ceph RGW, and so on). +- Supports backing up data of entire Nebula Graph cluster or specified spaces of it(_EXPERIMENTAL_), but now it has some limitations: + - when restore use this, all other spaces will be erased! # Limitation - Incremental backup not supported for now @@ -18,11 +17,9 @@ Backup and Restore (BR) is a CommandLine Interface Tool to back up data of graph - For backup to local disk, backup files would be placed at each services(e.g. storage or meta)'s local path. A recommended practice is to mount a NFS Filesystem at that path so that one can restore the backup files to a difference host. For detail, please reference to the [Implementation](#Implementation) part. - Restoring a backup of specified spaces is only allowed to perform INPLACE, which means that if one backup a specified space from Cluster-A, this backup cannot be restored to another cluster(Let's say Cluster-B). Restoring an entire backup wouldn't have this limitation - Target cluster to restore must have the same topologies with the cluster where the backup comes from -- Hosts where BR CLI run and the hosts of target cluster(both storage and meta service) be authenticated with provided username in SSH Tunnel protocol # Prerequisites -- Hosts of cluster and host of CLI running at should be ssh authenticated with provided username. -- Hosts of cluster has installed cli tools of selected backend in $PATH: `hadoop` for HDFS, `ossutil` for Alibaba Cloud OSS, `aws` for amazon s3, etc. +- Nebula cluster to backup/restore should start the agent service in each host # Quick Start - Clone the tool repo: @@ -41,7 +38,7 @@ bin/br version ``` - Basically one can run with `--help` for each subcommand usage of BR. - - Backup a cluster: + - Full backup a cluster: ``` Usage: br backup full [flags] @@ -50,101 +47,125 @@ bin/br version -h, --help help for full Global Flags: - --concurrent int max concurrent(for aliyun OSS) (default 5) - --connection int max ssh connection (default 5) - --extra_args string backup storage utils(oss/hdfs/s3) args for backup - --log string log path (default "br.log") - --meta string meta server - --spaces stringArray (EXPERIMENTAL)space names. + --log string Specify br detail log path (default "br.log") + --meta string Specify meta server, any metad server will be ok + --spaces stringArray (EXPERIMENTAL)space names. By this option, user can specify which spaces to backup. Now this feature is still experimental. - - --storage string backup target url, format: ://. - : a string indicating which backend type. optional: local, hdfs. - now hdfs and local is supported, s3 and oss are still experimental. - example: - for local - "local:///the/local/path/to/backup" - for hdfs - "hdfs://example_host:example_port/examplepath" - for oss - "oss://example/url/to/the/backup" - (EXPERIMENTAL) for s3 - "s3://example/url/to/the/backup" - - --user string username to login into the hosts where meta/storage service located - --verbose show backup detailed informations + If not specified, will backup all spaces. + + --storage string backup target url, format: ://. + : a string indicating which backend type. optional: local, hdfs. + now hdfs and local is supported, s3 and oss are still experimental. + example: + for local - "local:///the/local/path/to/backup" + for s3 - "s3://example/url/to/the/backup" + + --s3.access_key string S3 Option: set access key id + --s3.endpoint string S3 Option: set the S3 endpoint URL, please specify the http or https scheme explicitly + --s3.region string S3 Option: set region or location to upload or download backup + --s3.secret_key string S3 Option: set secret key for access id ``` - For example, the command below will conduct a full backup operation of entire cluster whose meta service's address is `0.0.0.0:1234`, with username `foo` to ssh-login hosts of cluster and upload the backup files to HDFS URL `hdfs://0.0.0.0:9000/example/backup/path`. + For example, the command below will conduct a full backup operation of entire cluster whose meta service's address is `127.0.0.1:9559`, upload the backup files to minio storage `s3://br-test/backup`. ``` - br backup full --meta "0.0.0.0:1234" --storage "hdfs://0.0.0.0:9000/example/backup/path" --user "foo" --verbose + /br backup full --meta "127.0.0.1:9559" --s3.endpoint "http://127.0.0.1:9000" --storage="s3://br-test/backup/" --s3.access_key=minioadmin --s3.secret_key=minioadmin ``` + Note: only when the storage uri is "s3://xxx", the s3 option is necessary. If the uri is "local://xxx", the s3 option is useless. + - Show information of existing backups: ``` Usage: - br show [flags] + br show [flags] Flags: - -h, --help help for show - --storage string storage path - - Global Flags: - --log string log path (default "br.log") + -h, --help help for show + --log string Specify br detail log path (default "br.log") + --s3.access_key string S3 Option: set access key id + --s3.endpoint string S3 Option: set the S3 endpoint URL, please specify the http or https scheme explicitly + --s3.region string S3 Option: set region or location to upload or download backup + --s3.secret_key string S3 Option: set secret key for access id + --storage string backup target url, format: ://. + : a string indicating which backend type. optional: local, hdfs. + now hdfs and local is supported, s3 and oss are still experimental. + example: + for local - "local:///the/local/path/to/backup" + for s3 - "s3://example/url/to/the/backup" ``` For example, the command below will list the information of existing backups in HDFS URL `hdfs://0.0.0.0:9000/example/backup/path` ``` - br show --storage "hdfs://0.0.0.0:9000/example/backup/path" + br show --s3.endpoint "http://192.168.8.214:9000" --storage="s3://br-test/backup/" --s3.access_key=minioadmin --s3.secret_key=minioadmin ``` Output of `show` subcommand would be like below: ``` - +----------------------------+---------------------+------------------------------------+-------------+--------------+ - | NAME | CREATE TIME | SPACES | FULL BACKUP | SYSTEM SPACE | - +----------------------------+---------------------+------------------------------------+-------------+--------------+ - | BACKUP_2021_07_16_02_39_04 | 2021-07-16 10:39:05 | basketballplayer | true | true | - +----------------------------+---------------------+------------------------------------+-------------+--------------+ + +----------------------------+---------------------+--------+-------------+------------+ + | NAME | CREATE TIME | SPACES | FULL BACKUP | ALL SPACES | + +----------------------------+---------------------+--------+-------------+------------+ + | BACKUP_2021_12_11_14_40_12 | 2021-12-11 14:40:43 | nba | true | true | + | BACKUP_2021_12_13_14_18_52 | 2021-12-13 14:18:52 | nba | true | true | + | BACKUP_2021_12_13_15_06_27 | 2021-12-13 15:06:29 | nba | true | false | + | BACKUP_2021_12_21_12_01_59 | 2021-12-21 12:01:59 | nba | true | false | + +----------------------------+---------------------+--------+-------------+------------+ ``` - - Restore cluster from a specified backup: ``` Usage: - br restore [command] - - Available Commands: - full full restore Nebula Graph Database + br restore full [flags] Flags: - --concurrent int max concurrent(for aliyun OSS) (default 5) - --extra_args string storage utils(oss/hdfs/s3) args for restore - -h, --help help for restore - --meta string meta server - --name string backup name - --storage string storage path - --user string user for meta and storage + -h, --help help for full Global Flags: - --log string log path (default "br.log") - - Use "br restore [command] --help" for more information about a command. + --concurrency int Max concurrency for download data (default 5) + --log string Specify br detail log path (default "br.log") + --meta string Specify meta server, any metad server will be ok + --name string Specify backup name + + --storage string backup target url, format: ://. + : a string indicating which backend type. optional: local, hdfs. + now hdfs and local is supported, s3 and oss are still experimental. + example: + for local - "local:///the/local/path/to/backup" + for s3 - "s3://example/url/to/the/backup" + + --s3.access_key string S3 Option: set access key id + --s3.endpoint string S3 Option: set the S3 endpoint URL, please specify the http or https scheme explicitly + --s3.region string S3 Option: set region or location to upload or download backup + --s3.secret_key string S3 Option: set secret key for access id ``` - For example, the command below will conduct a restore operation, which restore to the cluster whose meta service address is `0.0.0.0:1234`, from local disk in path `/example/backup/path`. + For example, the command below will conduct a restore operation, which restore to the cluster whose meta service address is `127.0.0.1:9559`, from local disk in path `/home/nebula/backup/BACKUP_2021_12_08_18_38_08`. Note that by local disk backend, it will restore the backup files from the local path of the target cluster. If target cluster's host has changed, it may encounter an error because of missing files. A recommend practice is to mount a common NFS to prevent that. - ``` - br restore full --meta "0.0.0.0:1234" --storage "local:///example/backup/path" --name "BACKUP_2021_07_16_02_39_04" --user "foo" - ``` - - Clean up temporary files if any error occured during backup. - ``` - Usage: - br cleanup [flags] + ``` + br restore full --storage "local:///home/nebula/backup/" --meta "127.0.0.1:9559" --name BACKUP_2021_12_08_18_38_08 + ``` - Flags: - --backup_name string backup name - -h, --help help for cleanup - --meta strings meta server + - Clean up temporary files if any error occured during backup. It will clean the files in cluster and external storage. + ``` + Usage: + br cleanup [flags] - Global Flags: - --log string log path (default "br.log") + Flags: + -h, --help help for cleanup + --log string Specify br detail log path (default "br.log") + --meta string Specify meta server, any metad service will be ok + --name string Specify backup name + + --storage string backup target url, format: ://. + : a string indicating which backend type. optional: local, hdfs. + now hdfs and local is supported, s3 and oss are still experimental. + example: + for local - "local:///the/local/path/to/backup" + for s3 - "s3://example/url/to/the/backup + + --s3.access_key string S3 Option: set access key id + --s3.endpoint string S3 Option: set the S3 endpoint URL, please specify the http or https scheme explicitly + --s3.region string S3 Option: set region or location to upload or download backup + --s3.secret_key string S3 Option: set secret key for access id ``` # Implementation @@ -163,3 +184,5 @@ bin/br version - For restoring storage service's data, BR CLI would download the snapshots and restart storage service. + Note: BR CLI depend on agents in cluster hosts to upload/download the backup files between the external storage and the cluster machines. + diff --git a/cmd/backup.go b/cmd/backup.go index f3cc98f..4831c36 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "fmt" "github.com/spf13/cobra" @@ -16,32 +17,9 @@ func NewBackupCmd() *cobra.Command { SilenceUsage: true, } + config.AddCommonFlags(backupCmd.PersistentFlags()) + config.AddBackupFlags(backupCmd.PersistentFlags()) backupCmd.AddCommand(newFullBackupCmd()) - backupCmd.PersistentFlags().StringVar(&backupConfig.Meta, "meta", "", "meta server") - backupCmd.PersistentFlags().StringArrayVar(&backupConfig.SpaceNames, "spaces", nil, - `(EXPERIMENTAL)space names. - By this option, user can specify which spaces to backup. Now this feature is still experimental. - `) - backupCmd.PersistentFlags().StringVar(&backupConfig.BackendUrl, "storage", "", - `backup target url, format: ://. - : a string indicating which backend type. optional: local, hdfs. - now hdfs and local is supported, s3 and oss are still experimental. - example: - for local - "local:///the/local/path/to/backup" - for hdfs - "hdfs://example_host:example_port/examplepath" - (EXPERIMENTAL) for oss - "oss://example/url/to/the/backup" - (EXPERIMENTAL) for s3 - "s3://example/url/to/the/backup" - `) - backupCmd.PersistentFlags().StringVar(&backupConfig.User, "user", "", "username to login into the hosts where meta/storage service located") - backupCmd.PersistentFlags().IntVar(&backupConfig.MaxSSHConnections, "connection", 5, "max ssh connection") - backupCmd.PersistentFlags().IntVar(&backupConfig.MaxConcurrent, "concurrent", 5, "max concurrent(for aliyun OSS)") - backupCmd.PersistentFlags().StringVar(&backupConfig.CommandArgs, "extra_args", "", "backup storage utils(oss/hdfs/s3) args for backup") - backupCmd.PersistentFlags().BoolVar(&backupConfig.Verbose, "verbose", false, "show backup detailed informations") - - backupCmd.MarkPersistentFlagRequired("meta") - backupCmd.MarkPersistentFlagRequired("storage") - backupCmd.MarkPersistentFlagRequired("user") - return backupCmd } @@ -49,39 +27,29 @@ func newFullBackupCmd() *cobra.Command { fullBackupCmd := &cobra.Command{ Use: "full", Short: "full backup Nebula Graph Database", - Args: func(cmd *cobra.Command, args []string) error { - - if backupConfig.MaxSSHConnections <= 0 { - backupConfig.MaxSSHConnections = 5 - } - - if backupConfig.MaxConcurrent <= 0 { - backupConfig.MaxConcurrent = 5 + RunE: func(cmd *cobra.Command, args []string) error { + err := log.SetLog(cmd.Flags()) + if err != nil { + return fmt.Errorf("init logger failed: %w", err) } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - logger, err := log.NewLogger(config.LogPath) + cfg := &config.BackupConfig{} + err = cfg.ParseFlags(cmd.Flags()) if err != nil { - return err + return fmt.Errorf("parse flags failed: %w", err) } - defer logger.Sync() // flushes buffer, if any - var b *backup.Backup - b, err = backup.NewBackupClient(backupConfig, logger.Logger) + + b, err := backup.NewBackup(context.TODO(), cfg) if err != nil { return err } fmt.Println("start to backup cluster...") - err = b.BackupCluster() + err = b.Backup() if err != nil { return err } fmt.Println("backup successed.") - if backupConfig.Verbose { - b.ShowSummaries() - } return nil }, } diff --git a/cmd/cleanup.go b/cmd/cleanup.go index b58fe41..e27f205 100644 --- a/cmd/cleanup.go +++ b/cmd/cleanup.go @@ -1,6 +1,9 @@ package cmd import ( + "context" + "fmt" + "github.com/spf13/cobra" "github.com/vesoft-inc/nebula-br/pkg/cleanup" "github.com/vesoft-inc/nebula-br/pkg/config" @@ -10,16 +13,26 @@ import ( func NewCleanupCmd() *cobra.Command { cleanupCmd := &cobra.Command{ Use: "cleanup", - Short: "[EXPERIMENTAL]Clean up temporary files in backup", + Short: "Cleanup backup files in external storage and nebula cluster", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { - logger, _ := log.NewLogger(config.LogPath) + err := log.SetLog(cmd.Flags()) + if err != nil { + return fmt.Errorf("init logger failed: %w", err) + } - defer logger.Sync() // flushes buffer, if any - c := cleanup.NewCleanup(cleanupConfig, logger.Logger) + cfg := config.CleanupConfig{} + err = cfg.ParseFlags(cmd.Flags()) + if err != nil { + return fmt.Errorf("parse flags failed") + } - err := c.Run() + c, err := cleanup.NewCleanup(context.TODO(), cfg) + if err != nil { + return err + } + err = c.Clean() if err != nil { return err } @@ -28,10 +41,7 @@ func NewCleanupCmd() *cobra.Command { }, } - cleanupCmd.PersistentFlags().StringVar(&cleanupConfig.BackupName, "backup_name", "", "backup name") - cleanupCmd.MarkPersistentFlagRequired("backup_name") - cleanupCmd.PersistentFlags().StringSliceVar(&cleanupConfig.MetaServer, "meta", nil, "meta server") - cleanupCmd.MarkPersistentFlagRequired("meta") - + config.AddCommonFlags(cleanupCmd.PersistentFlags()) + config.AddCleanupFlags(cleanupCmd.PersistentFlags()) return cleanupCmd } diff --git a/cmd/cmd.go b/cmd/cmd.go deleted file mode 100644 index 0caef88..0000000 --- a/cmd/cmd.go +++ /dev/null @@ -1,10 +0,0 @@ -package cmd - -import "github.com/vesoft-inc/nebula-br/pkg/config" - -var ( - backupConfig config.BackupConfig - restoreConfig config.RestoreConfig - // for cleanup - cleanupConfig config.CleanupConfig -) diff --git a/cmd/restore.go b/cmd/restore.go index f78e5d9..04bbf9c 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "fmt" "github.com/spf13/cobra" @@ -9,26 +10,15 @@ import ( "github.com/vesoft-inc/nebula-br/pkg/restore" ) -func NewRestoreCMD() *cobra.Command { +func NewRestoreCmd() *cobra.Command { restoreCmd := &cobra.Command{ Use: "restore", Short: "restore Nebula Graph Database", SilenceUsage: true, } - + config.AddCommonFlags(restoreCmd.PersistentFlags()) + config.AddRestoreFlags(restoreCmd.PersistentFlags()) restoreCmd.AddCommand(newFullRestoreCmd()) - restoreCmd.PersistentFlags().StringVar(&restoreConfig.Meta, "meta", "", "meta server") - restoreCmd.PersistentFlags().StringVar(&restoreConfig.BackendUrl, "storage", "", "storage path") - restoreCmd.PersistentFlags().StringVar(&restoreConfig.User, "user", "", "user for meta and storage") - restoreCmd.PersistentFlags().StringVar(&restoreConfig.BackupName, "name", "", "backup name") - restoreCmd.PersistentFlags().IntVar(&restoreConfig.MaxConcurrent, "concurrent", 5, "max concurrent(for aliyun OSS)") - restoreCmd.PersistentFlags().StringVar(&restoreConfig.CommandArgs, "extra_args", "", "storage utils(oss/hdfs/s3) args for restore") - - restoreCmd.MarkPersistentFlagRequired("meta") - restoreCmd.MarkPersistentFlagRequired("storage") - restoreCmd.MarkPersistentFlagRequired("user") - restoreCmd.MarkPersistentFlagRequired("name") - return restoreCmd } @@ -36,31 +26,24 @@ func newFullRestoreCmd() *cobra.Command { fullRestoreCmd := &cobra.Command{ Use: "full", Short: "full restore Nebula Graph Database", - Args: func(cmd *cobra.Command, args []string) error { - - if restoreConfig.MaxConcurrent <= 0 { - restoreConfig.MaxConcurrent = 5 + RunE: func(cmd *cobra.Command, args []string) error { + err := log.SetLog(cmd.Flags()) + if err != nil { + return fmt.Errorf("init logger failed: %w", err) } - return nil - }, - - RunE: func(cmd *cobra.Command, args []string) error { - // nil mean backup all space - logger, err := log.NewLogger(config.LogPath) + cfg := &config.RestoreConfig{} + err = cfg.ParseFlags(cmd.Flags()) if err != nil { return err } - defer logger.Sync() // flushes buffer, if any - - var r *restore.Restore - r, err = restore.NewRestore(restoreConfig, logger.Logger) + r, err := restore.NewRestore(context.TODO(), cfg) if err != nil { return err } - err = r.RestoreCluster() + err = r.Restore() if err != nil { return err } diff --git a/cmd/show.go b/cmd/show.go index 4cdd1eb..87c9225 100644 --- a/cmd/show.go +++ b/cmd/show.go @@ -1,11 +1,13 @@ package cmd import ( + "context" + "fmt" + "github.com/spf13/cobra" "github.com/vesoft-inc/nebula-br/pkg/config" "github.com/vesoft-inc/nebula-br/pkg/log" "github.com/vesoft-inc/nebula-br/pkg/show" - "go.uber.org/zap" ) var backendUrl string @@ -16,30 +18,31 @@ func NewShowCmd() *cobra.Command { Short: "show backup info", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { - // nil mean backup all space + err := log.SetLog(cmd.Flags()) + if err != nil { + return fmt.Errorf("init logger failed: %w", err) + } - logger, err := log.NewLogger(config.LogPath) + cfg := &config.ShowConfig{} + err = cfg.ParseFlags(cmd.Flags()) if err != nil { return err } - defer logger.Sync() // flushes buffer, if any - - s := show.NewShow(backendUrl, logger.Logger) + s, err := show.NewShow(context.TODO(), cfg) + if err != nil { + return err + } - err = s.ShowInfo() + err = s.Show() if err != nil { - logger.Error("show info failed", zap.Error(err)) return err } return nil }, } - - showCmd.PersistentFlags().StringVar(&backendUrl, "storage", "", "storage path") - - showCmd.MarkPersistentFlagRequired("storage") + config.AddCommonFlags(showCmd.PersistentFlags()) return showCmd } diff --git a/cmd/validate.go b/cmd/validate.go deleted file mode 100644 index f47fce9..0000000 --- a/cmd/validate.go +++ /dev/null @@ -1,49 +0,0 @@ -package cmd - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/vesoft-inc/nebula-br/pkg/remote" - "go.uber.org/zap" -) - -type addressValidateError struct { - Ip string -} - -type sshValidateError struct { - err error -} - -func (e sshValidateError) Error() string { - return e.err.Error() -} - -func (e addressValidateError) Error() string { - return fmt.Sprintf("The ip address(%s) must contain the port", e.Ip) -} - -func (e sshValidateError) UnWrap() error { - return e.err -} - -func checkSSH(addr string, user string, log *zap.Logger) error { - log.Info("checking ssh", zap.String("addr", addr)) - ipAddr := strings.Split(addr, ":") - if len(ipAddr) != 2 { - return &addressValidateError{addr} - } - client, err := remote.NewClient(ipAddr[0], user, log) - if err != nil { - log.Error("must enable SSH tunneling") - return &sshValidateError{err} - } - client.Close() - return nil -} - -func checkPathAbs(path string) bool { - return filepath.IsAbs(path) -} diff --git a/cmd/validate_test.go b/cmd/validate_test.go deleted file mode 100644 index aaa74e8..0000000 --- a/cmd/validate_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package cmd - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap" -) - -func TestCheckSSH(t *testing.T) { - addrs := []string{"111"} - user := "testuser" - log, _ := zap.NewProduction() - err := checkSSH(addrs, user, log) - assert.Error(t, err) - var addrError addressValidateError - addrError.Ip = "111" - assert.Equal(t, err, &addrError) -} diff --git a/go.mod b/go.mod index 3e41781..08ae52d 100644 --- a/go.mod +++ b/go.mod @@ -1,17 +1,16 @@ module github.com/vesoft-inc/nebula-br -go 1.14 +go 1.16 require ( - github.com/facebook/fbthrift v0.31.1-0.20210223140454-614a73a42488 + github.com/facebook/fbthrift v0.31.1-0.20211129061412-801ed7f9f295 + github.com/google/uuid v1.3.0 github.com/olekukonko/tablewriter v0.0.5 - github.com/pkg/errors v0.9.1 // indirect - github.com/scylladb/go-set v1.0.2 + github.com/sirupsen/logrus v1.8.1 github.com/spf13/cobra v1.1.1 - github.com/stretchr/testify v1.6.1 - github.com/vesoft-inc/nebula-go/v2 v2.0.0-20210608032721-3e7bf449d35f - go.uber.org/zap v1.10.0 - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/net v0.0.0-20201110031124-69a78807bb2b - golang.org/x/sync v0.0.0-20190423024810-112230192c58 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.7.0 + github.com/vesoft-inc/nebula-agent v0.0.0-20211230083817-a9effd897e2c + github.com/vesoft-inc/nebula-go/v2 v2.5.2-0.20211228055601-b5b11a36e453 + golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 // indirect ) diff --git a/go.sum b/go.sum index 41b241a..8f8e489 100644 --- a/go.sum +++ b/go.sum @@ -16,15 +16,23 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.42.22 h1:EwcM7/+Ytg6xK+jbeM2+f9OELHqPiEiEKetT/GgAr7I= +github.com/aws/aws-sdk-go v1.42.22/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -36,11 +44,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/facebook/fbthrift v0.31.1-0.20210223140454-614a73a42488 h1:A4KCT0mvTBkvb93gGN+efLPkrgTqmqMeaLDG51KVhMM= -github.com/facebook/fbthrift v0.31.1-0.20210223140454-614a73a42488/go.mod h1:2tncLx5rmw69e5kMBv/yJneERbzrr1yr5fdlnTbu8lU= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebook/fbthrift v0.31.1-0.20211129061412-801ed7f9f295 h1:ZA+qQ3d2In0RNzVpk+D/nq1sjDSv+s1Wy2zrAPQAmsg= +github.com/facebook/fbthrift v0.31.1-0.20211129061412-801ed7f9f295/go.mod h1:2tncLx5rmw69e5kMBv/yJneERbzrr1yr5fdlnTbu8lU= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/set v0.2.1 h1:nn2CaJyknWE/6txyUDGwysr3G5QC6xWB/PtVjPBbeaA= -github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -52,22 +64,41 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -75,6 +106,7 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -97,6 +129,10 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= @@ -135,9 +171,7 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -146,20 +180,22 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/scylladb/go-set v1.0.2 h1:SkvlMCKhP0wyyct6j+0IHJkBkSZL+TDzZ4E7f7BCcRE= -github.com/scylladb/go-set v1.0.2/go.mod h1:DkpGd78rljTxKAnTDPFqXSGxvETQnJyuSOQwsHycqfs= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -174,33 +210,31 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/vesoft-inc/nebula-go/v2 v2.0.0-20210608032721-3e7bf449d35f h1:AmjFqUC+cpM3Gc3wn/F/X+sSsmSFat0ec0qAnpQ8tQs= -github.com/vesoft-inc/nebula-go/v2 v2.0.0-20210608032721-3e7bf449d35f/go.mod h1:B7nR6+nOSo0umq/HkCmUfyRtYrJVOsNiPS9u4djDbSc= +github.com/vesoft-inc/nebula-agent v0.0.0-20211230083817-a9effd897e2c h1:KPiVKnrQiEJaKrtT5EewVZD9jwoQJ4eaQZR4eX2Q4sQ= +github.com/vesoft-inc/nebula-agent v0.0.0-20211230083817-a9effd897e2c/go.mod h1:79lL9wmxYYKgMHP+9Q+MLc3q+G9HFU/sCnBhy85G0B0= +github.com/vesoft-inc/nebula-go/v2 v2.5.2-0.20211228055601-b5b11a36e453 h1:1rwe3LQVuTRUJBf4Gonc47+T3dCD29EzkrRaTzkUNdw= +github.com/vesoft-inc/nebula-go/v2 v2.5.2-0.20211228055601-b5b11a36e453/go.mod h1:YRIuog6zyRKz0SagwwTcqHXCPjJ4GfQelIl+/FgSC+Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -233,18 +267,19 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -258,14 +293,20 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -278,6 +319,7 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -286,6 +328,9 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -304,9 +349,32 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= @@ -316,12 +384,17 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/main.go b/main.go index 22220cf..3c486c1 100644 --- a/main.go +++ b/main.go @@ -3,16 +3,13 @@ package main import ( "github.com/spf13/cobra" "github.com/vesoft-inc/nebula-br/cmd" - "github.com/vesoft-inc/nebula-br/pkg/config" ) func main() { - rootCmd := &cobra.Command{ Use: "br", - Short: "BR is a Nebula backup and restore tool", + Short: "Nebula br is a Nebula backup and restore tool", } - rootCmd.AddCommand(cmd.NewBackupCmd(), cmd.NewVersionCmd(), cmd.NewRestoreCMD(), cmd.NewCleanupCmd(), cmd.NewShowCmd()) - rootCmd.PersistentFlags().StringVar(&config.LogPath, "log", "br.log", "log path") + rootCmd.AddCommand(cmd.NewBackupCmd(), cmd.NewVersionCmd(), cmd.NewRestoreCmd(), cmd.NewCleanupCmd(), cmd.NewShowCmd()) rootCmd.Execute() } diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index 033bd15..cc33bae 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -1,408 +1,232 @@ package backup import ( - "encoding/json" - "errors" + "context" "fmt" - _ "os" - "os/exec" + "path" "path/filepath" "strconv" - "strings" - "time" - - _ "github.com/facebook/fbthrift/thrift/lib/go/thrift" - "go.uber.org/zap" - "golang.org/x/net/context" - "golang.org/x/sync/errgroup" + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + pb "github.com/vesoft-inc/nebula-agent/pkg/proto" + "github.com/vesoft-inc/nebula-agent/pkg/storage" + "github.com/vesoft-inc/nebula-br/pkg/clients" "github.com/vesoft-inc/nebula-br/pkg/config" - backupCtx "github.com/vesoft-inc/nebula-br/pkg/context" - "github.com/vesoft-inc/nebula-br/pkg/metaclient" - "github.com/vesoft-inc/nebula-br/pkg/remote" - "github.com/vesoft-inc/nebula-br/pkg/storage" "github.com/vesoft-inc/nebula-br/pkg/utils" "github.com/vesoft-inc/nebula-go/v2/nebula" "github.com/vesoft-inc/nebula-go/v2/nebula/meta" ) -var defaultTimeout time.Duration = 120 * time.Second -var tmpDir = "/tmp/" - -type BackupError struct { - msg string - Err error -} - -type spaceInfo struct { - spaceID nebula.GraphSpaceID - checkpointDir string -} - -var LeaderNotFoundError = errors.New("not found leader") -var backupFailed = errors.New("backup failed") - -func (e *BackupError) Error() string { - return e.msg + e.Err.Error() -} - -type backupEntry struct { - SrcPath string - DestUrl string -} -type idPathMap map[string][]backupEntry type Backup struct { - config config.BackupConfig - metaLeader string - backendStorage storage.ExternalStorage - log *zap.Logger - metaFileName string - storageMap map[string]idPathMap - metaMap map[string]idPathMap - storeCtx *backupCtx.Context -} - -func NewBackupClient(cf config.BackupConfig, log *zap.Logger) (*Backup, error) { - local_addr, err := remote.GetAddresstoReachRemote(strings.Split(cf.Meta, ":")[0], cf.User, log) - if err != nil { - log.Error("get local address failed", zap.Error(err)) - return nil, err - } - log.Info("local address", zap.String("address", local_addr)) - var ( - storeCtx backupCtx.Context - backend storage.ExternalStorage - ) - backend, err = storage.NewExternalStorage(cf.BackendUrl, log, cf.MaxConcurrent, cf.CommandArgs, - &storeCtx) - if err != nil { - log.Error("new external storage failed", zap.Error(err)) - return nil, err - } - - b := &Backup{config: cf, log: log, - storageMap: make(map[string]idPathMap), - metaMap: make(map[string]idPathMap), - storeCtx: &storeCtx} - - b.storeCtx.LocalAddr = local_addr - b.storeCtx.Reporter = b - b.backendStorage = backend + ctx context.Context + cfg *config.BackupConfig + meta *clients.NebulaMeta - return b, nil + hosts *utils.NebulaHosts + sto storage.ExternalStorage } -func (b *Backup) dropBackup(name []byte) (*meta.ExecResp, error) { - - client := metaclient.NewMetaClient(b.log) - err := client.Open(b.metaLeader) - if err != nil { - return nil, err +func NewBackup(ctx context.Context, cfg *config.BackupConfig) (*Backup, error) { + b := &Backup{ + ctx: context.WithValue(ctx, storage.SessionKey, uuid.NewString()), + cfg: cfg, } - snapshot := meta.NewDropSnapshotReq() - snapshot.Name = name - defer client.Close() - - resp, err := client.DropBackup(snapshot) + var err error + b.meta, err = clients.NewMeta(cfg.MetaAddr) if err != nil { - return nil, err + return nil, fmt.Errorf("create meta client failed: %w", err) } - if resp.GetCode() != nebula.ErrorCode_SUCCEEDED { - return nil, fmt.Errorf("drop backup failed %d", resp.GetCode()) + b.sto, err = storage.New(cfg.Backend) + if err != nil { + return nil, fmt.Errorf("create storage failed: %w", err) } - return resp, nil -} - -func (b *Backup) createBackup() (*meta.CreateBackupResp, error) { - b.metaLeader = b.config.Meta - - for { - client := metaclient.NewMetaClient(b.log) - err := client.Open(b.metaLeader) - if err != nil { - return nil, err - } - - backupReq := meta.NewCreateBackupReq() - defer client.Close() - if len(b.config.SpaceNames) != 0 { - for _, name := range b.config.SpaceNames { - backupReq.Spaces = append(backupReq.Spaces, []byte(name)) - } - } - - resp, err := client.CreateBackup(backupReq) - if err != nil { - return nil, err - } - - if resp.GetCode() != nebula.ErrorCode_E_LEADER_CHANGED && resp.GetCode() != nebula.ErrorCode_SUCCEEDED { - b.log.Error("backup failed", zap.String("error code", resp.GetCode().String())) - return nil, backupFailed - } - - if resp.GetCode() == nebula.ErrorCode_SUCCEEDED { - return resp, nil - } - - leader := resp.GetLeader() - if leader == meta.ExecResp_Leader_DEFAULT { - return nil, LeaderNotFoundError - } - - b.log.Info("leader changed", zap.String("leader", leader.String())) - b.metaLeader = metaclient.HostaddrToString(leader) + listRes, err := b.meta.ListCluster() + if err != nil { + return nil, fmt.Errorf("list cluster failed: %w", err) } -} - -func (b *Backup) writeMetadata(meta *meta.BackupMeta) error { - b.metaFileName = tmpDir + string(meta.BackupName[:]) + ".meta" - - var absMetaFiles [][]byte - for _, files := range meta.MetaFiles { - f := filepath.Base(string(files[:])) - absMetaFiles = append(absMetaFiles, []byte(f)) + b.hosts = &utils.NebulaHosts{} + err = b.hosts.LoadFrom(listRes) + if err != nil { + return nil, fmt.Errorf("parse cluster response failed: %w", err) } - meta.MetaFiles = absMetaFiles - - return utils.PutMetaToFile(b.log, meta, b.metaFileName) + return b, nil } -func (b *Backup) BackupCluster() error { - b.log.Info("start backup nebula cluster") - resp, err := b.createBackup() +// upload the meta backup files in host to external uri +// localDir are absolute meta checkpoint folder in host filesystem +// targetUri is external storage's uri, which is meta's root dir, +// has pattern like local://xxx, hdfs://xxx +func (b *Backup) uploadMeta(host *nebula.HostAddr, targetUri string, localDir string) error { + agentAddr, err := b.hosts.GetAgentFor(b.meta.LeaderAddr()) if err != nil { - b.log.Error("backup cluster failed", zap.Error(err)) return err } - - meta := resp.GetMeta() - b.log.Info("response backup meta", - zap.String("backup.meta", metaclient.BackupMetaToString(meta))) - - err = b.uploadAll(meta) + agent, err := clients.NewAgent(b.ctx, agentAddr) if err != nil { - return err + return fmt.Errorf("create agent failed: %w", err) } - return nil -} - -func (b *Backup) execPreUploadMetaCommand(metaDir string) error { - cmdStr := []string{"mkdir", "-p", metaDir} - b.log.Info("exec pre upload meta command", zap.Strings("cmd", cmdStr)) - cmd := exec.Command(cmdStr[0], cmdStr[1:]...) - err := cmd.Run() + backend, err := b.sto.GetDir(b.ctx, targetUri) if err != nil { - return err + return fmt.Errorf("get storage backend for %s failed: %w", targetUri, err) + } + req := &pb.UploadFileRequest{ + SourcePath: localDir, + TargetBackend: backend, + Recursively: true, + } + _, err = agent.UploadFile(req) + if err != nil { + return fmt.Errorf("upload file by agent failed: %w", err) } - cmd.Wait() return nil } -func (b *Backup) uploadMeta(g *errgroup.Group, files []string) { - - b.log.Info("start upload meta", zap.String("addr", b.metaLeader)) - ipAddr := strings.Split(b.metaLeader, ":") - b.storeCtx.RemoteAddr = ipAddr[0] - - b.log.Info("will upload meta", zap.Int("sst file count", len(files))) - cmd := b.backendStorage.BackupMetaCommand(files) - - func(addr string, user string, cmd string, log *zap.Logger) { - g.Go(func() error { - client, err := remote.NewClient(addr, user, log) - if err != nil { - return err - } - defer client.Close() - return client.ExecCommandBySSH(cmd) - }) - }(ipAddr[0], b.config.User, cmd, b.log) -} - -func (b *Backup) uploadStorage(g *errgroup.Group, dirs map[string][]spaceInfo) error { - b.log.Info("uploadStorage", zap.Int("dirs length", len(dirs))) - for k, v := range dirs { - b.log.Info("start upload storage", zap.String("addr", k)) - idMap := make(map[string][]string) - for _, info := range v { - idStr := strconv.FormatInt(int64(info.spaceID), 10) - idMap[idStr] = append(idMap[idStr], info.checkpointDir) +func (b *Backup) uploadStorage(hostDirs map[string]map[string][]string, targetUri string) error { + for addrStr, spaceDirs := range hostDirs { + // get storage node's agent + addr, err := utils.ParseAddr(addrStr) + if err != nil { + return err } - - ipAddrs := strings.Split(k, ":") - b.log.Info("uploadStorage idMap", zap.Int("idMap length", len(idMap))) - clients, err := remote.NewClientPool(ipAddrs[0], b.config.User, b.log, b.config.MaxSSHConnections) + agentAddr, err := b.hosts.GetAgentFor(addr) + if err != nil { + return err + } + agent, err := clients.NewAgent(b.ctx, agentAddr) if err != nil { - b.log.Error("new clients failed", zap.Error(err)) return err } - i := 0 - b.storeCtx.RemoteAddr = ipAddrs[0] + logger := log.WithField("host", addrStr) + // upload every space in this node + for idStr, dirs := range spaceDirs { + for i, source := range dirs { + // {backupRoot}/{backupName}/data/{addr}/data{0..n}/{spaceId} + target, _ := utils.UriJoin(targetUri, addrStr, fmt.Sprintf("data%d", i), idStr) + backend, err := b.sto.GetDir(b.ctx, target) + if err != nil { + return fmt.Errorf("get storage backend for %s failed: %w", target, err) + } - //We need to limit the number of ssh connections per storage node - for id2, cp := range idMap { - cmds := b.backendStorage.BackupStorageCommand(cp, k, id2) - for _, cmd := range cmds { - if i >= len(clients) { - i = 0 + req := &pb.UploadFileRequest{ + SourcePath: source, + TargetBackend: backend, + Recursively: true, } - client := clients[i] - func(client *remote.Client, cmd string) { - g.Go(func() error { - return client.ExecCommandBySSH(cmd) - }) - }(client, cmd) + _, err = agent.UploadFile(req) + if err != nil { + return fmt.Errorf("upload %s to %s failed:%w", source, target, err) + } + logger.WithField("src", source).WithField("target", target).Info("Upload storage checkpoint successfully") } - i++ } } + return nil } -func (b *Backup) uploadMetaFile() error { - cmdStr := b.backendStorage.BackupMetaFileCommand(b.metaFileName) - b.log.Info("will upload metafile", zap.Strings("cmd", cmdStr)) +func (b *Backup) generateMetaFile(meta *meta.BackupMeta) (string, error) { + tmpMetaPath := filepath.Join(utils.LocalTmpDir, fmt.Sprintf("%s.meta", string(meta.BackupName))) - cmd := exec.Command(cmdStr[0], cmdStr[1:]...) - err := cmd.Run() - if err != nil { - return err + var fileNames [][]byte + for _, pathBytes := range meta.MetaFiles { + name := filepath.Base(string(pathBytes[:])) + fileNames = append(fileNames, []byte(name)) } - cmd.Wait() + meta.MetaFiles = fileNames - return nil + return tmpMetaPath, utils.DumpMetaToFile(meta, tmpMetaPath) } -func (b *Backup) execPreCommand(backupName string) error { - b.backendStorage.SetBackupName(backupName) - cmdStr := b.backendStorage.BackupPreCommand() - if cmdStr == nil { - return nil - } - b.log.Info("exec pre command", zap.Strings("cmd", cmdStr)) - cmd := exec.Command(cmdStr[0], cmdStr[1:]...) - err := cmd.Run() +func (b *Backup) Backup() error { + // step2: call the meta service, create backup files in each local + backupRes, err := b.meta.CreateBackup(b.cfg.Spaces) if err != nil { return err } - cmd.Wait() - - return nil -} + backupInfo := backupRes.GetMeta() + logger := log.WithField("name", string(backupInfo.GetBackupName())) + logger.WithField("backup info", utils.StringifyBackup(backupInfo)).Info("Create backup in nebula machine's local") -func (b *Backup) uploadAll(meta *meta.BackupMeta) error { - //upload meta - g, _ := errgroup.WithContext(context.Background()) - - err := b.execPreCommand(string(meta.GetBackupName()[:])) + // step3: ensure root dir + rootUri, err := utils.UriJoin(b.cfg.Backend.Uri(), string(backupInfo.BackupName)) if err != nil { - b.log.Error("exec pre command failed", zap.Error(err)) return err } - - if b.backendStorage.Scheme() == storage.SCHEME_LOCAL { // NB: only local backend need this - err = b.execPreUploadMetaCommand(b.backendStorage.BackupMetaDir()) - if err != nil { - b.log.Error("exec pre uploadmeta command failed", zap.Error(err)) - return err - } + err = b.sto.EnsureDir(b.ctx, rootUri, false) + if err != nil { + return fmt.Errorf("ensure dir %s failed: %w", rootUri, err) } + logger.WithField("root", rootUri).Info("Ensure backup root dir") - var metaFiles []string - for _, f := range meta.GetMetaFiles() { - fileName := string(f[:]) - metaFiles = append(metaFiles, string(fileName)) + // step4: upload meta files + metaDir, _ := utils.UriJoin(rootUri, "meta") + if len(backupInfo.GetMetaFiles()) == 0 { + return fmt.Errorf("there is no meta files in backup info") } - b.uploadMeta(g, metaFiles) - //upload storage - storageMap := make(map[string][]spaceInfo) - for k, v := range meta.GetBackupInfo() { - for _, i := range v.GetInfo() { - for _, f := range i.GetInfo() { - dir := string(f.Path) - cpDir := spaceInfo{k, dir} - storageMap[metaclient.HostaddrToString(i.Host)] = append(storageMap[metaclient.HostaddrToString(i.Host)], cpDir) + localMetaDir := path.Dir(string(backupInfo.MetaFiles[0])) + if err = b.uploadMeta(b.meta.LeaderAddr(), metaDir, localMetaDir); err != nil { + return err + } + logger.WithField("meta", metaDir).Info("Upload meta successfully") + + // step5: upload storage files + storageDir, _ := utils.UriJoin(rootUri, "data") + hostDirs := make(map[string]map[string][]string) + // group checkpoint dirs by host and space id + for sid, sb := range backupInfo.GetSpaceBackups() { + idStr := strconv.FormatInt(int64(sid), 10) + for _, hb := range sb.GetHostBackups() { + hostStr := utils.StringifyAddr(hb.GetHost()) + for _, cp := range hb.GetCheckpoints() { + if _, ok := hostDirs[hostStr]; !ok { + hostDirs[hostStr] = make(map[string][]string) + } + + hostDirs[hostStr][idStr] = append(hostDirs[hostStr][idStr], string(cp.GetPath())) } } } - - err = b.uploadStorage(g, storageMap) + err = b.uploadStorage(hostDirs, storageDir) if err != nil { - return err + return fmt.Errorf("upload stoarge failed %w", err) } + logger.WithField("data", storageDir).Info("Upload data backup successfully") - err = g.Wait() - if err != nil { - b.log.Error("upload error", zap.Error(err)) + // step6: generate backup meta files and upload + if err := utils.EnsureDir(utils.LocalTmpDir); err != nil { return err } - // write the meta for this backup to local + defer func() { + if err := utils.RemoveDir(utils.LocalTmpDir); err != nil { + log.WithError(err).Errorf("Remove tmp dir %s failed", utils.LocalTmpDir) + } + }() - err = b.writeMetadata(meta) + tmpMetaPath, err := b.generateMetaFile(backupInfo) if err != nil { - b.log.Error("write the meta file failed", zap.Error(err)) - return err + return fmt.Errorf("write meta to tmp path failed: %w", err) } - b.log.Info("write meta data finished") - // upload meta file - err = b.uploadMetaFile() + logger.WithField("tmp path", tmpMetaPath).Info("Write meta data to local tmp file successfully") + backupMetaPath, _ := utils.UriJoin(rootUri, filepath.Base(tmpMetaPath)) + err = b.sto.Upload(b.ctx, backupMetaPath, tmpMetaPath, false) if err != nil { - b.log.Error("upload meta file failed", zap.Error(err)) - return err + return fmt.Errorf("upload local tmp file to remote storage %s failed: %w", backupMetaPath, err) } + logger.WithField("remote path", backupMetaPath).Info("Upload tmp backup meta file to remote") - _, err = b.dropBackup(meta.GetBackupName()) + // step7: drop backup files in cluster machine local and local tmp files + err = b.meta.DropBackup(backupInfo.GetBackupName()) if err != nil { - b.log.Error("drop backup failed", zap.Error(err)) + return fmt.Errorf("drop backup %s in cluster local failed: %w", + string(backupInfo.BackupName[:]), err) } - - b.log.Info("backup nebula cluster finished", zap.String("backupName", string(meta.GetBackupName()[:]))) + logger.Info("Drop backup in cluster and local tmp folder successfully") return nil } - -func (b *Backup) ShowSummaries() { - fmt.Printf("==== backup summeries ====\n") - fmt.Printf("localaddr : %s\n", b.storeCtx.LocalAddr) - fmt.Printf("backend.type : %s\n", b.backendStorage.Scheme()) - fmt.Printf("backend.url : %s\n", b.backendStorage.URI()) - fmt.Printf("tgt.meta.leader : %s\n", b.config.Meta) - if b.backendStorage.Scheme() == storage.SCHEME_LOCAL { - // if local, storages' snapshot would be copy to a path at that host. - b.showUploadSummaries(&b.metaMap, "tgt.meta.map") - b.showUploadSummaries(&b.storageMap, "tgt.storage.map") - } - fmt.Printf("==========================\n") -} - -func (b *Backup) showUploadSummaries(m *map[string]idPathMap, msg string) { - o, _ := json.MarshalIndent(m, "", " ") - fmt.Printf("--- %s ---\n", msg) - fmt.Printf("%s\n", string(o)) -} - -func (b *Backup) doRecordUploading(m *map[string]idPathMap, spaceId string, host string, paths []string, desturl string) { - if (*m)[host] == nil { - (*m)[host] = make(idPathMap) - } - bes := []backupEntry{} - for _, p := range paths { - bes = append(bes, backupEntry{SrcPath: p, DestUrl: desturl}) - } - (*m)[host][spaceId] = append((*m)[host][spaceId], bes[:]...) -} - -func (b *Backup) StorageUploadingReport(spaceid string, host string, paths []string, desturl string) { - b.doRecordUploading(&b.storageMap, spaceid, host, paths, desturl) -} - -func (b *Backup) MetaUploadingReport(host string, paths []string, desturl string) { - kDefaultSid := "0" - b.doRecordUploading(&b.metaMap, kDefaultSid, host, paths, desturl) -} diff --git a/pkg/cleanup/cleanup.go b/pkg/cleanup/cleanup.go index 0d2ba06..6471cb8 100644 --- a/pkg/cleanup/cleanup.go +++ b/pkg/cleanup/cleanup.go @@ -1,73 +1,81 @@ package cleanup import ( - "errors" + "context" + "fmt" + "github.com/vesoft-inc/nebula-agent/pkg/storage" + "github.com/vesoft-inc/nebula-br/pkg/clients" "github.com/vesoft-inc/nebula-br/pkg/config" - "github.com/vesoft-inc/nebula-br/pkg/metaclient" + "github.com/vesoft-inc/nebula-br/pkg/utils" - "github.com/vesoft-inc/nebula-go/v2/nebula" - "github.com/vesoft-inc/nebula-go/v2/nebula/meta" - "go.uber.org/zap" + log "github.com/sirupsen/logrus" ) type Cleanup struct { - log *zap.Logger - config config.CleanupConfig + ctx context.Context + cfg config.CleanupConfig + client *clients.NebulaMeta + sto storage.ExternalStorage } -var LeaderNotFoundError = errors.New("not found leader") -var CleanupError = errors.New("cleanup failed") - -func NewCleanup(config config.CleanupConfig, log *zap.Logger) *Cleanup { - return &Cleanup{log, config} -} +func NewCleanup(ctx context.Context, cfg config.CleanupConfig) (*Cleanup, error) { + sto, err := storage.New(cfg.Backend) + if err != nil { + return nil, fmt.Errorf("create storage for %s failed: %w", cfg.Backend.Uri(), err) + } -func (c *Cleanup) dropBackup() (*meta.ExecResp, error) { - addr := c.config.MetaServer[0] - backupName := []byte(c.config.BackupName[:]) + client, err := clients.NewMeta(cfg.MetaAddr) + if err != nil { + return nil, fmt.Errorf("create meta client failed: %w", err) + } - for { - client := metaclient.NewMetaClient(c.log) - err := client.Open(addr) - if err != nil { - return nil, err - } + return &Cleanup{ + ctx: ctx, + cfg: cfg, + client: client, + sto: sto, + }, nil +} - snapshot := meta.NewDropSnapshotReq() - snapshot.Name = backupName - defer client.Close() +func (c *Cleanup) cleanNebula() error { + err := c.client.DropBackup([]byte(c.cfg.BackupName)) + if err != nil { + return fmt.Errorf("drop backup failed: %w", err) + } + log.Debugf("Drop backup %s successfully", c.cfg.BackupName) - resp, err := client.DropBackup(snapshot) - if err != nil { - return nil, err - } + return nil +} - if resp.GetCode() != nebula.ErrorCode_E_LEADER_CHANGED && resp.GetCode() != nebula.ErrorCode_SUCCEEDED { - c.log.Error("cleanup failed", zap.String("error code", resp.GetCode().String())) - return nil, CleanupError - } +func (c *Cleanup) cleanExternal() error { + backupUri, err := utils.UriJoin(c.cfg.Backend.Uri(), c.cfg.BackupName) + if err != nil { + return err + } - if resp.GetCode() == nebula.ErrorCode_SUCCEEDED { - return resp, nil - } + err = c.sto.RemoveDir(c.ctx, backupUri) + if err != nil { + return fmt.Errorf("remove %s in external storage failed: %w", backupUri, err) + } + return nil +} - leader := resp.GetLeader() - if leader == meta.ExecResp_Leader_DEFAULT { - return nil, LeaderNotFoundError - } +func (c *Cleanup) Clean() error { + logger := log.WithField("backup name", c.cfg.BackupName) - c.log.Info("leader changed", zap.String("leader", leader.String())) - addr = metaclient.HostaddrToString(leader) + logger.Info("Start to cleanup data in nebula cluster") + err := c.cleanNebula() + if err != nil { + return fmt.Errorf("clean nebula local data failed: %w", err) } -} -func (c *Cleanup) Run() error { - _, err := c.dropBackup() + logger.Info("Start cleanup data in external storage") + err = c.cleanExternal() if err != nil { - c.log.Error("cleanup failed", zap.Error(err)) - return err + return fmt.Errorf("clean external storage data failed: %w", err) } - c.log.Info("cleanup finished", zap.String("backupname", c.config.BackupName)) + + logger.Info("Clean up backup data successfully") return nil } diff --git a/pkg/clients/agent.go b/pkg/clients/agent.go new file mode 100644 index 0000000..0de0339 --- /dev/null +++ b/pkg/clients/agent.go @@ -0,0 +1,73 @@ +package clients + +import ( + "context" + "fmt" + + agent "github.com/vesoft-inc/nebula-agent/pkg/client" + "github.com/vesoft-inc/nebula-br/pkg/utils" + "github.com/vesoft-inc/nebula-go/v2/nebula" +) + +type NebulaAgent struct { + agent.Client +} + +func NewAgent(ctx context.Context, agentAddr *nebula.HostAddr) (*NebulaAgent, error) { + cfg := &agent.Config{ + Addr: agentAddr, + } + c, err := agent.New(ctx, cfg) + if err != nil { + return nil, err + } + + a := &NebulaAgent{ + Client: c, + } + + return a, nil +} + +type AgentManager struct { + ctx context.Context + agents map[string]*NebulaAgent // group by ip or host + hosts *utils.NebulaHosts +} + +func NewAgentManager(ctx context.Context, hosts *utils.NebulaHosts) *AgentManager { + return &AgentManager{ + ctx: ctx, + agents: make(map[string]*NebulaAgent), + hosts: hosts, + } +} + +func (a *AgentManager) GetAgentFor(serviceAddr *nebula.HostAddr) (*NebulaAgent, error) { + agentAddr, err := a.hosts.GetAgentFor(serviceAddr) + if err != nil { + return nil, fmt.Errorf("get agent address for graph service %s failed: %w", + utils.StringifyAddr(agentAddr), err) + } + + return a.GetAgent(agentAddr) +} + +func (a *AgentManager) GetAgent(agentAddr *nebula.HostAddr) (*NebulaAgent, error) { + if agent, ok := a.agents[agentAddr.Host]; ok { + if agent.GetAddr().Host != agentAddr.Host || agent.GetAddr().Port != agentAddr.Port { + return nil, fmt.Errorf("there are two agents, %s and %s, in the same host: %s", + utils.StringifyAddr(agent.GetAddr()), utils.StringifyAddr(agentAddr), agentAddr.Host) + } + + return agent, nil + } + + agent, err := NewAgent(a.ctx, agentAddr) + if err != nil { + return nil, fmt.Errorf("create agent %s failed: %w", utils.StringifyAddr(agentAddr), err) + } + + a.agents[agentAddr.Host] = agent + return agent, nil +} diff --git a/pkg/clients/meta.go b/pkg/clients/meta.go new file mode 100644 index 0000000..5119583 --- /dev/null +++ b/pkg/clients/meta.go @@ -0,0 +1,255 @@ +package clients + +import ( + "fmt" + "time" + + log "github.com/sirupsen/logrus" + "github.com/vesoft-inc/nebula-br/pkg/utils" + "github.com/vesoft-inc/nebula-go/v2/nebula" + "github.com/vesoft-inc/nebula-go/v2/nebula/meta" +) + +type NebulaMeta struct { + client *meta.MetaServiceClient + leaderAddr *nebula.HostAddr +} + +func NewMeta(addrStr string) (*NebulaMeta, error) { + addr, err := utils.ParseAddr(addrStr) + if err != nil { + return nil, err + } + + m := &NebulaMeta{ + leaderAddr: addr, + } + + if m.client, err = connect(addr); err != nil { + return nil, err + } + + return m, nil +} + +func (m *NebulaMeta) LeaderAddr() *nebula.HostAddr { + return m.leaderAddr +} + +func (m *NebulaMeta) reconnect(addr *nebula.HostAddr) error { + if addr == meta.ExecResp_Leader_DEFAULT { + return fmt.Errorf("leader not found when call ListCluster") + } + m.client.Close() + + var err error + c, err := connect(addr) + if err != nil { + return fmt.Errorf("connect to new meta client leader %s failed: %w", + utils.StringifyAddr(addr), err) + } + + m.leaderAddr = addr + m.client = c + return nil +} + +func (m *NebulaMeta) ListCluster() (*meta.ListClusterInfoResp, error) { + req := &meta.ListClusterInfoReq{} + + for { + resp, err := m.client.ListCluster(req) + if err != nil { + return nil, fmt.Errorf("list cluster %s failed: %w", utils.StringifyAddr(m.leaderAddr), err) + } + + // retry only when leader change + if resp.GetCode() == nebula.ErrorCode_E_LEADER_CHANGED { + err := m.reconnect(resp.GetLeader()) + if err != nil { + return nil, err + } + continue + } + + // fill the meta dir info + for _, services := range resp.GetHostServices() { + for _, s := range services { + if s.Role == meta.HostRole_META { + dir, err := m.getMetaDirInfo(s.GetAddr()) + if err != nil { + return nil, fmt.Errorf("get meta %s from address failed: %w", + utils.StringifyAddr(s.GetAddr()), err) + } + s.Dir = dir + } + } + } + + return resp, nil + } +} + +func (m *NebulaMeta) CreateBackup(spaces []string) (*meta.CreateBackupResp, error) { + req := meta.NewCreateBackupReq() + if spaces != nil || len(spaces) != 0 { + req.Spaces = make([][]byte, 0, len(spaces)) + for _, space := range spaces { + req.Spaces = append(req.Spaces, []byte(space)) + } + } + + for { + resp, err := m.client.CreateBackup(req) + if err != nil { + return nil, err + } + + if resp.GetCode() == nebula.ErrorCode_E_LEADER_CHANGED { + err := m.reconnect(resp.GetLeader()) + if err != nil { + return nil, err + } + continue + } + + return resp, nil + } + +} + +func (m *NebulaMeta) DropBackup(name []byte) error { + req := meta.NewDropSnapshotReq() + req.Name = name + + for { + resp, err := m.client.DropSnapshot(req) + if err != nil { + return fmt.Errorf("call drop snapshot failed: %w", err) + } + + if resp.GetCode() == nebula.ErrorCode_E_LEADER_CHANGED { + err := m.reconnect(resp.GetLeader()) + if err != nil { + return err + } + continue + } + + if resp.GetCode() == nebula.ErrorCode_SUCCEEDED { + return nil + } + return fmt.Errorf("call drop snapshot failed: %s", resp.GetCode().String()) + } + +} + +func (m *NebulaMeta) GetSpace(space []byte) (*meta.GetSpaceResp, error) { + req := meta.NewGetSpaceReq() + req.SpaceName = space + + for { + resp, err := m.client.GetSpace(req) + if err != nil { + return nil, err + } + + if resp.GetCode() == nebula.ErrorCode_E_LEADER_CHANGED { + err := m.reconnect(resp.GetLeader()) + if err != nil { + return nil, err + } + continue + } + + return resp, nil + } +} + +func (m *NebulaMeta) DropSpace(space []byte, ifExists bool) error { + req := meta.NewDropSpaceReq() + req.SpaceName = space + req.IfExists = ifExists + + for { + resp, err := m.client.DropSpace(req) + if err != nil { + return err + } + + if resp.GetCode() == nebula.ErrorCode_E_LEADER_CHANGED { + err := m.reconnect(resp.GetLeader()) + if err != nil { + return err + } + continue + } + + if resp.GetCode() == nebula.ErrorCode_SUCCEEDED { + return nil + } + return fmt.Errorf("call DropSpace failed: %s", resp.GetCode().String()) + } +} + +// single metad node +func (m *NebulaMeta) RestoreMeta(metaAddr *nebula.HostAddr, hostMap []*meta.HostPair, files []string) error { + byteFiles := make([][]byte, 0, len(files)) + for _, f := range files { + byteFiles = append(byteFiles, []byte(f)) + } + req := meta.NewRestoreMetaReq() + req.Hosts = hostMap + req.Files = byteFiles + + for try := 1; try <= 3; try++ { + client, err := connect(metaAddr) + if err != nil { + log.WithError(err).WithField("addr", utils.StringifyAddr(metaAddr)). + Errorf("connect to metad failed, try times %d", try) + time.Sleep(time.Second * 2) + continue + } + + resp, err := client.RestoreMeta(req) + if err != nil { + log.WithError(err).WithField("req", req).Error("Restore meta failed") + return err + } + + if resp.GetCode() == nebula.ErrorCode_SUCCEEDED { + return nil + } + return fmt.Errorf("call %s:RestoreMeta failed: %s", + utils.StringifyAddr(metaAddr), resp.GetCode().String()) + } + + return fmt.Errorf("try to connect %s 3 times, but failed", utils.StringifyAddr(metaAddr)) +} + +func (m *NebulaMeta) getMetaDirInfo(addr *nebula.HostAddr) (*nebula.DirInfo, error) { + log.WithField("addr", utils.StringifyAddr(addr)).Debug("Try to get dir info from meta service") + c, err := connect(addr) + if err != nil { + return nil, err + } + + defer func() { + e := c.Close() + if e != nil { + log.WithError(e).WithField("host", addr.String()).Error("Close error when get meta dir info.") + } + }() + + req := &meta.GetMetaDirInfoReq{} + resp, err := c.GetMetaDirInfo(req) + if err != nil { + return nil, err + } + + if resp.GetCode() != nebula.ErrorCode_SUCCEEDED { + return nil, fmt.Errorf("get meta dir info failed: %v", resp.GetCode()) + } + + return resp.GetDir(), nil +} diff --git a/pkg/clients/utils.go b/pkg/clients/utils.go new file mode 100644 index 0000000..81af4e8 --- /dev/null +++ b/pkg/clients/utils.go @@ -0,0 +1,37 @@ +package clients + +import ( + "fmt" + "time" + + "github.com/facebook/fbthrift/thrift/lib/go/thrift" + log "github.com/sirupsen/logrus" + "github.com/vesoft-inc/nebula-br/pkg/utils" + "github.com/vesoft-inc/nebula-go/v2/nebula" + "github.com/vesoft-inc/nebula-go/v2/nebula/meta" +) + +const ( + defaultTimeout time.Duration = 120 * time.Second +) + +func connect(metaAddr *nebula.HostAddr) (*meta.MetaServiceClient, error) { + log.WithField("meta address", utils.StringifyAddr(metaAddr)).Info("try to connect meta service") + timeoutOption := thrift.SocketTimeout(defaultTimeout) + addressOption := thrift.SocketAddr(utils.StringifyAddr(metaAddr)) + sock, err := thrift.NewSocket(timeoutOption, addressOption) + if err != nil { + return nil, fmt.Errorf("open socket failed: %w", err) + } + + bufferedTranFactory := thrift.NewBufferedTransportFactory(128 << 10) + transport := thrift.NewFramedTransport(bufferedTranFactory.GetTransport(sock)) + pf := thrift.NewBinaryProtocolFactoryDefault() + client := meta.NewMetaServiceClientFactory(transport, pf) + if err := client.CC.Open(); err != nil { + return nil, fmt.Errorf("open meta failed %w", err) + } + + log.WithField("meta address", utils.StringifyAddr(metaAddr)).Info("connect meta server successfully") + return client, nil +} diff --git a/pkg/config/backup.go b/pkg/config/backup.go new file mode 100644 index 0000000..ac58286 --- /dev/null +++ b/pkg/config/backup.go @@ -0,0 +1,44 @@ +package config + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + pb "github.com/vesoft-inc/nebula-agent/pkg/proto" + "github.com/vesoft-inc/nebula-br/pkg/storage" +) + +func AddBackupFlags(flags *pflag.FlagSet) { + flags.StringArray(FlagSpaces, nil, + `(EXPERIMENTAL)space names. + By this option, user can specify which spaces to backup. Now this feature is still experimental. + If not specified, will backup all spaces. + `) + flags.String(FlagMetaAddr, "", "Specify meta server") + cobra.MarkFlagRequired(flags, FlagMetaAddr) + cobra.MarkFlagRequired(flags, FlagStorage) +} + +type BackupConfig struct { + MetaAddr string + Spaces []string + Backend *pb.Backend // Backend is associated with the root uri +} + +func (b *BackupConfig) ParseFlags(flags *pflag.FlagSet) error { + var err error + b.MetaAddr, err = flags.GetString(FlagMetaAddr) + if err != nil { + return err + } + b.Spaces, err = flags.GetStringArray(FlagSpaces) + if err != nil { + return err + } + b.Backend, err = storage.ParseFromFlags(flags) + if err != nil { + return fmt.Errorf("parse storage flags failed: %w", err) + } + return nil +} diff --git a/pkg/config/cleanup.go b/pkg/config/cleanup.go new file mode 100644 index 0000000..7c7a0c6 --- /dev/null +++ b/pkg/config/cleanup.go @@ -0,0 +1,41 @@ +package config + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + pb "github.com/vesoft-inc/nebula-agent/pkg/proto" + "github.com/vesoft-inc/nebula-br/pkg/storage" +) + +func AddCleanupFlags(flags *pflag.FlagSet) { + flags.String(FlagMetaAddr, "", "Specify meta server") + flags.String(flagBackupName, "", "Specify backup name") + cobra.MarkFlagRequired(flags, FlagMetaAddr) + cobra.MarkFlagRequired(flags, flagBackupName) + cobra.MarkFlagRequired(flags, FlagStorage) +} + +type CleanupConfig struct { + MetaAddr string + BackupName string + Backend *pb.Backend // Backend is associated with the root uri +} + +func (c *CleanupConfig) ParseFlags(flags *pflag.FlagSet) error { + var err error + c.MetaAddr, err = flags.GetString(FlagMetaAddr) + if err != nil { + return err + } + c.BackupName, err = flags.GetString(flagBackupName) + if err != nil { + return err + } + c.Backend, err = storage.ParseFromFlags(flags) + if err != nil { + return fmt.Errorf("parse storage flags failed: %w", err) + } + return nil +} diff --git a/pkg/config/common.go b/pkg/config/common.go new file mode 100644 index 0000000..063fbf9 --- /dev/null +++ b/pkg/config/common.go @@ -0,0 +1,27 @@ +package config + +import ( + "github.com/spf13/pflag" + "github.com/vesoft-inc/nebula-br/pkg/storage" +) + +const ( + FlagStorage = "storage" + FlagMetaAddr = "meta" + FlagSpaces = "spaces" + + FlagLogPath = "log" + + flagBackupName = "name" +) + +func AddCommonFlags(flags *pflag.FlagSet) { + flags.String(FlagLogPath, "br.log", "Specify br detail log path") + storage.AddFlags(flags) +} + +type NodeInfo struct { + Addrs string + RootDir string + DataDir []string +} diff --git a/pkg/config/config.go b/pkg/config/config.go deleted file mode 100644 index a01e061..0000000 --- a/pkg/config/config.go +++ /dev/null @@ -1,38 +0,0 @@ -package config - -type NodeInfo struct { - Addrs string - RootDir string - DataDir []string - User string -} - -type BackupConfig struct { - Meta string - SpaceNames []string - BackendUrl string - MaxSSHConnections int - User string - // Only for OSS for now - MaxConcurrent int - CommandArgs string - Verbose bool -} - -type RestoreConfig struct { - Meta string - BackendUrl string - MaxSSHConnections int - User string - BackupName string - // Only for OSS for now - MaxConcurrent int - CommandArgs string -} - -type CleanupConfig struct { - BackupName string - MetaServer []string -} - -var LogPath string diff --git a/pkg/config/restore.go b/pkg/config/restore.go new file mode 100644 index 0000000..ee2aa1e --- /dev/null +++ b/pkg/config/restore.go @@ -0,0 +1,47 @@ +package config + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + pb "github.com/vesoft-inc/nebula-agent/pkg/proto" + "github.com/vesoft-inc/nebula-br/pkg/storage" +) + +const ( + flagConcurrency = "concurrency" +) + +func AddRestoreFlags(flags *pflag.FlagSet) { + flags.String(FlagMetaAddr, "", "Specify meta server") + flags.String(flagBackupName, "", "Specify backup name") + flags.Int(flagConcurrency, 5, "Max concurrency for download data") // TODO(spw): not use now + + cobra.MarkFlagRequired(flags, FlagMetaAddr) + cobra.MarkFlagRequired(flags, FlagStorage) + cobra.MarkFlagRequired(flags, flagBackupName) +} + +type RestoreConfig struct { + MetaAddr string + BackupName string + Backend *pb.Backend +} + +func (r *RestoreConfig) ParseFlags(flags *pflag.FlagSet) error { + var err error + r.MetaAddr, err = flags.GetString(FlagMetaAddr) + if err != nil { + return err + } + r.BackupName, err = flags.GetString(flagBackupName) + if err != nil { + return err + } + r.Backend, err = storage.ParseFromFlags(flags) + if err != nil { + return fmt.Errorf("parse storage flags failed: %w", err) + } + return nil +} diff --git a/pkg/config/show.go b/pkg/config/show.go new file mode 100644 index 0000000..7a88ea7 --- /dev/null +++ b/pkg/config/show.go @@ -0,0 +1,22 @@ +package config + +import ( + "fmt" + + "github.com/spf13/pflag" + pb "github.com/vesoft-inc/nebula-agent/pkg/proto" + "github.com/vesoft-inc/nebula-br/pkg/storage" +) + +type ShowConfig struct { + Backend *pb.Backend +} + +func (s *ShowConfig) ParseFlags(flags *pflag.FlagSet) error { + var err error + s.Backend, err = storage.ParseFromFlags(flags) + if err != nil { + return fmt.Errorf("parse storage flags failed: %w", err) + } + return nil +} diff --git a/pkg/context/context.go b/pkg/context/context.go deleted file mode 100644 index 1c1fe7d..0000000 --- a/pkg/context/context.go +++ /dev/null @@ -1,21 +0,0 @@ -package context - -import ( - _ "github.com/vesoft-inc/nebula-go/v2/nebula" -) - -type BackendUploadTracker interface { - StorageUploadingReport(spaceid string, host string, paths []string, desturl string) - MetaUploadingReport(host string, paths []string, desturl string) -} - -// NB - not thread-safe -type Context struct { - LocalAddr string // the address of br client - RemoteAddr string // the address of nebula service - Reporter BackendUploadTracker -} - -func NewContext(localaddr string, r BackendUploadTracker) *Context { - return &Context{LocalAddr: localaddr, Reporter: r} -} diff --git a/pkg/log/log.go b/pkg/log/log.go index e2c9c29..63407bc 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -1,17 +1,34 @@ package log -import "go.uber.org/zap" +import ( + "io" + "os" -type Logger struct { - path string - *zap.Logger -} + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + "github.com/vesoft-inc/nebula-br/pkg/config" +) + +func SetLog(flags *pflag.FlagSet) error { + logrus.SetReportCaller(true) + logrus.SetFormatter(&logrus.JSONFormatter{ + TimestampFormat: "2006-01-02T15:04:05.000Z", + }) + logrus.SetLevel(logrus.InfoLevel) + + path, err := flags.GetString(config.FlagLogPath) + if err != nil { + return err + } -func NewLogger(logPath string) (*Logger, error) { - cfg := zap.NewProductionConfig() - cfg.OutputPaths = []string{ - logPath, + file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + logrus.WithError(err).WithField("file", path).Error("Create log path failed") + return err } - log, err := cfg.Build() - return &Logger{logPath, log}, err + + mw := io.MultiWriter(os.Stdout, file) + logrus.SetOutput(mw) + + return nil } diff --git a/pkg/metaclient/meta.go b/pkg/metaclient/meta.go deleted file mode 100644 index 2543cb0..0000000 --- a/pkg/metaclient/meta.go +++ /dev/null @@ -1,109 +0,0 @@ -package metaclient - -import ( - "fmt" - "time" - - "github.com/facebook/fbthrift/thrift/lib/go/thrift" - "github.com/vesoft-inc/nebula-go/v2/nebula/meta" - "go.uber.org/zap" -) - -type MetaClient struct { - client *meta.MetaServiceClient - log *zap.Logger -} - -var defaultTimeout time.Duration = 120 * time.Second - -func NewMetaClient(log *zap.Logger) *MetaClient { - return &MetaClient{log: log} -} - -func (m *MetaClient) RestoreMeta(req *meta.RestoreMetaReq) (*meta.ExecResp, error) { - if m.client == nil { - return nil, fmt.Errorf("client not open") - } - return m.client.RestoreMeta(req) -} - -func (m *MetaClient) CreateBackup(req *meta.CreateBackupReq) (*meta.CreateBackupResp, error) { - if m.client == nil { - return nil, fmt.Errorf("client not open") - } - return m.client.CreateBackup(req) -} - -func (m *MetaClient) DropBackup(req *meta.DropSnapshotReq) (*meta.ExecResp, error) { - if m.client == nil { - return nil, fmt.Errorf("client not open") - } - return m.client.DropSnapshot(req) -} - -func (m *MetaClient) ListCluster(req *meta.ListClusterInfoReq) (*meta.ListClusterInfoResp, error) { - if m.client == nil { - return nil, fmt.Errorf("client not open") - } - return m.client.ListCluster(req) -} - -func (m *MetaClient) ListMetaDir(req *meta.GetMetaDirInfoReq) (*meta.GetMetaDirInfoResp, error) { - if m.client == nil { - return nil, fmt.Errorf("client not open") - } - return m.client.GetMetaDirInfo(req) -} - -func (m *MetaClient) DropSpace(req *meta.DropSpaceReq) (*meta.ExecResp, error) { - if m.client == nil { - return nil, fmt.Errorf("client not open") - } - return m.client.DropSpace(req) -} - -func (m *MetaClient) GetSpaceInfo(req *meta.GetSpaceReq) (*meta.GetSpaceResp, error) { - if m.client == nil { - return nil, fmt.Errorf("client not open") - } - return m.client.GetSpace(req) -} - -func (m *MetaClient) Open(addr string) error { - m.log.Info("start open meta", zap.String("addr", addr)) - - if m.client != nil { - if err := m.client.CC.Close(); err != nil { - m.log.Warn("close backup falied", zap.Error(err)) - } - } - - timeoutOption := thrift.SocketTimeout(defaultTimeout) - addressOption := thrift.SocketAddr(addr) - sock, err := thrift.NewSocket(timeoutOption, addressOption) - if err != nil { - m.log.Error("open socket failed", zap.Error(err), zap.String("addr", addr)) - return err - } - - bufferedTranFactory := thrift.NewBufferedTransportFactory(128 << 10) - transport := thrift.NewFramedTransport(bufferedTranFactory.GetTransport(sock)) - - pf := thrift.NewBinaryProtocolFactoryDefault() - client := meta.NewMetaServiceClientFactory(transport, pf) - if err := client.CC.Open(); err != nil { - m.log.Error("open meta failed", zap.Error(err), zap.String("addr", addr)) - return err - } - m.client = client - return nil -} - -func (m *MetaClient) Close() error { - if m.client != nil { - if err := m.client.CC.Close(); err != nil { - return err - } - } - return nil -} diff --git a/pkg/metaclient/meta_test.go b/pkg/metaclient/meta_test.go deleted file mode 100644 index 75d5c98..0000000 --- a/pkg/metaclient/meta_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package metaclient - -import ( - "testing" - "time" - - "github.com/facebook/fbthrift/thrift/lib/go/thrift" - "github.com/stretchr/testify/assert" - "github.com/vesoft-inc/nebula-go/v2/nebula/meta" - "go.uber.org/zap" -) - -func TestOpen(t *testing.T) { - logger, _ := zap.NewProduction() - - assert := assert.New(t) - addr := "127.0.0.1:0" - - sock, err := thrift.NewServerSocket(addr) - assert.Nil(err) - - var handler meta.MetaService - processor := meta.NewMetaServiceProcessor(handler) - server := thrift.NewSimpleServerContext(processor, sock) - go server.Serve() - time.Sleep(2 * time.Second) - - metaClient := NewMetaClient(logger) - defer metaClient.Close() - backupReq := meta.NewCreateBackupReq() - - _, err = metaClient.CreateBackup(backupReq) - assert.NotNil(err) - err = metaClient.Open(sock.Addr().String()) - assert.Nil(err) - server.Stop() -} diff --git a/pkg/metaclient/util.go b/pkg/metaclient/util.go deleted file mode 100644 index 8d8f5ce..0000000 --- a/pkg/metaclient/util.go +++ /dev/null @@ -1,29 +0,0 @@ -package metaclient - -import ( - "encoding/json" - "strconv" - - "github.com/vesoft-inc/nebula-go/v2/nebula" - "github.com/vesoft-inc/nebula-go/v2/nebula/meta" -) - -func HostaddrToString(host *nebula.HostAddr) string { - return host.Host + ":" + strconv.Itoa(int(host.Port)) -} - -func BackupMetaToString(m *meta.BackupMeta) string { - mstr, err := json.Marshal(m) - if err != nil { - return "" - } - return string(mstr) -} - -func ListClusterInfoRespToString(m *meta.ListClusterInfoResp) string { - mstr, err := json.Marshal(m) - if err != nil { - return "" - } - return string(mstr) -} diff --git a/pkg/metaclient/util_test.go b/pkg/metaclient/util_test.go deleted file mode 100644 index d6c2dca..0000000 --- a/pkg/metaclient/util_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package metaclient - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/vesoft-inc/nebula-go/v2/nebula" -) - -func TestHostaddrToString(t *testing.T) { - assert := assert.New(t) - host := nebula.NewHostAddr() - host.Host = "192.168.8.1" - host.Port = 80 - - hostStr := HostaddrToString(host) - assert.Equal(hostStr, "192.168.8.1:80") -} diff --git a/pkg/remote/ssh.go b/pkg/remote/ssh.go deleted file mode 100644 index f2cc361..0000000 --- a/pkg/remote/ssh.go +++ /dev/null @@ -1,136 +0,0 @@ -package remote - -import ( - "bytes" - "context" - "io/ioutil" - "net" - "os" - "strings" - "time" - - "github.com/vesoft-inc/nebula-br/pkg/config" - "go.uber.org/zap" - "golang.org/x/crypto/ssh" - "golang.org/x/sync/errgroup" -) - -type Client struct { - client *ssh.Client - addr string - user string - log *zap.Logger -} - -func NewClient(addr string, user string, log *zap.Logger) (*Client, error) { - key, err := ioutil.ReadFile(os.Getenv("HOME") + "/.ssh/id_rsa") - if err != nil { - log.Error("unable to read private key", zap.Error(err)) - return nil, err - } - - // Create the Signer for this private key. - signer, err := ssh.ParsePrivateKey(key) - if err != nil { - log.Error("unable to parse private key", zap.Error(err)) - return nil, err - } - config := &ssh.ClientConfig{ - User: user, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, - } - - retry := 0 - for retry < 3 { - var client *ssh.Client - client, err = ssh.Dial("tcp", net.JoinHostPort(addr, "22"), config) - if err != nil { - log.Error("unable to connect host, will retry", zap.Int("attemp", retry), zap.Error(err), zap.String("host", addr), zap.String("user", user)) - time.Sleep(time.Second * 1) - retry += 1 - continue - } - return &Client{client, addr, user, log}, nil - - } - return nil, err -} - -func NewClientPool(addr string, user string, log *zap.Logger, count int) ([]*Client, error) { - var clients []*Client - for i := 0; i < count; i++ { - client, err := NewClient(addr, user, log) - if err != nil { - for _, c := range clients { - c.Close() - } - return nil, err - } - clients = append(clients, client) - } - - return clients, nil -} - -func GetAddresstoReachRemote(addr string, user string, log *zap.Logger) (string, error) { - if cli, err := NewClient(addr, user, log); err == nil { - log.Info("succeed to reach remote", zap.String("addr of local", cli.client.Conn.LocalAddr().String())) - return strings.Split(cli.client.Conn.LocalAddr().String(), ":")[0], nil - } else { - return "", err - } -} - -func (c *Client) Close() { - c.client.Close() -} - -func (c *Client) newSession() (*ssh.Session, error) { - session, err := c.client.NewSession() - if err != nil { - c.log.Error("new session failed", zap.Error(err)) - return nil, err - } - return session, nil -} - -func (c *Client) ExecCommandBySSH(cmd string) error { - session, err := c.newSession() - if err != nil { - return err - } - defer session.Close() - c.log.Info("ssh will exec", zap.String("addr", c.addr), zap.String("cmd", cmd), zap.String("user", c.user)) - var stdoutBuf bytes.Buffer - session.Stdout = &stdoutBuf - - err = session.Run(cmd) - if err != nil { - c.log.Error("ssh run failed", zap.Error(err), zap.String("addr", c.addr), zap.String("cmd", cmd)) - return err - } - c.log.Info("Command execution completed", zap.String("addr", c.addr), zap.String("cmd", cmd)) - return nil -} - -func CheckCommand(checkCommand string, nodes []config.NodeInfo, log *zap.Logger) error { - g, _ := errgroup.WithContext(context.Background()) - for _, node := range nodes { - addr := node.Addrs - ipAddrs := strings.Split(addr, ":") - user := node.User - client, err := NewClient(ipAddrs[0], user, log) - if err != nil { - return err - } - g.Go(func() error { return client.ExecCommandBySSH(checkCommand) }) - } - - err := g.Wait() - if err != nil { - return err - } - - return nil -} diff --git a/pkg/remote/ssh_test.go b/pkg/remote/ssh_test.go deleted file mode 100644 index 122fbf6..0000000 --- a/pkg/remote/ssh_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package remote - -import ( - "flag" - "testing" - - "github.com/stretchr/testify/assert" - "go.uber.org/zap" - _ "golang.org/x/crypto/ssh" -) - -var remoteAddr = flag.String("addr", "", "remote ssh addr for test") -var remoteUser = flag.String("user", "", "remote user for test") - -func TestClient(t *testing.T) { - ast := assert.New(t) - if *remoteAddr == "" || *remoteUser == "" { - t.Log("addr and user should be provided!") - return - } - - logger, _ := zap.NewProduction() - cli, err := NewClient(*remoteAddr, *remoteUser, logger) - ast.Nil(err) - - t.Logf("ssh user: %s", cli.client.Conn.User()) - t.Logf("local addr: %s, remote addr: %s", - cli.client.Conn.LocalAddr().String(), - cli.client.Conn.RemoteAddr().String()) -} - -func TestGetLocalAddress(t *testing.T) { - ast := assert.New(t) - if *remoteAddr == "" || *remoteUser == "" { - t.Log("addr and user should be provided!") - return - } - logger, _ := zap.NewProduction() - laddr, err := GetAddresstoReachRemote(*remoteAddr, *remoteUser, logger) - ast.Nil(err) - - t.Logf("local addr: %s", laddr) -} diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 8f90401..9f3450c 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -2,791 +2,595 @@ package restore import ( "context" - "errors" "fmt" _ "os" - "os/exec" + "path/filepath" + "reflect" "sort" - "strconv" - "strings" "time" - _ "github.com/facebook/fbthrift/thrift/lib/go/thrift" - "github.com/scylladb/go-set/strset" + log "github.com/sirupsen/logrus" + "github.com/vesoft-inc/nebula-agent/pkg/storage" + "github.com/vesoft-inc/nebula-br/pkg/clients" "github.com/vesoft-inc/nebula-br/pkg/config" - backupCtx "github.com/vesoft-inc/nebula-br/pkg/context" - "github.com/vesoft-inc/nebula-br/pkg/metaclient" - "github.com/vesoft-inc/nebula-br/pkg/remote" - "github.com/vesoft-inc/nebula-br/pkg/storage" "github.com/vesoft-inc/nebula-br/pkg/utils" + pb "github.com/vesoft-inc/nebula-agent/pkg/proto" "github.com/vesoft-inc/nebula-go/v2/nebula" "github.com/vesoft-inc/nebula-go/v2/nebula/meta" - - "go.uber.org/zap" - "golang.org/x/sync/errgroup" ) -type Restore struct { - config config.RestoreConfig - backend storage.ExternalStorage - log *zap.Logger - metaLeader string - client *metaclient.MetaClient - storageNodes []config.NodeInfo - metaNodes []config.NodeInfo - metaFileName string - ctx *backupCtx.Context - backSuffix string +func GetBackupSuffix() string { + return fmt.Sprintf("_old_%d", time.Now().Unix()) } -type spaceInfo struct { - spaceID nebula.GraphSpaceID - cpDir string +type Restore struct { + ctx context.Context + cfg *config.RestoreConfig + sto storage.ExternalStorage + hosts *utils.NebulaHosts + meta *clients.NebulaMeta + agentMgr *clients.AgentManager + + rootUri string + backupName string + backSuffix string } -var LeaderNotFoundError = errors.New("not found leader") -var restoreFailed = errors.New("restore failed") -var listClusterFailed = errors.New("list cluster failed") -var spaceNotMatching = errors.New("Space mismatch") -var dropSpaceFailed = errors.New("drop space failed") -var getSpaceFailed = errors.New("get space failed") - -func NewRestore(config config.RestoreConfig, log *zap.Logger) (*Restore, error) { - local_addr, err := remote.GetAddresstoReachRemote(strings.Split(config.Meta, ":")[0], config.User, log) +func NewRestore(ctx context.Context, cfg *config.RestoreConfig) (*Restore, error) { + sto, err := storage.New(cfg.Backend) if err != nil { - log.Error("get local address failed", zap.Error(err)) - return nil, err + return nil, fmt.Errorf("create storage failed: %w", err) } - log.Info("local address", zap.String("address", local_addr)) - ctx := backupCtx.NewContext(local_addr, nil) + client, err := clients.NewMeta(cfg.MetaAddr) + if err != nil { + return nil, fmt.Errorf("create meta client failed: %w", err) + } - backend, err := storage.NewExternalStorage(config.BackendUrl, log, config.MaxConcurrent, config.CommandArgs, ctx) + listRes, err := client.ListCluster() if err != nil { - log.Error("new external storage failed", zap.Error(err)) - return nil, err + return nil, fmt.Errorf("list cluster failed: %w", err) } - backend.SetBackupName(config.BackupName) - return &Restore{config: config, log: log, backend: backend, ctx: ctx}, nil + hosts := &utils.NebulaHosts{} + err = hosts.LoadFrom(listRes) + if err != nil { + return nil, fmt.Errorf("parse cluster response failed: %w", err) + } + + return &Restore{ + ctx: ctx, + cfg: cfg, + sto: sto, + meta: client, + hosts: hosts, + agentMgr: clients.NewAgentManager(ctx, hosts), + rootUri: cfg.Backend.Uri(), + backupName: cfg.BackupName, + }, nil } func (r *Restore) checkPhysicalTopology(info map[nebula.GraphSpaceID]*meta.SpaceBackupInfo) error { - s := strset.New() - maxInfoLen := 0 - for _, v := range info { - for _, i := range v.Info { - s.Add(metaclient.HostaddrToString(i.Host)) - if len(i.Info) > maxInfoLen { - maxInfoLen = len(i.Info) + var ( + backupPaths = make(map[int]int) + backupStorages = make(map[string]bool) + ) + + for _, space := range info { + for _, host := range space.GetHostBackups() { + if _, ok := backupStorages[utils.StringifyAddr(host.GetHost())]; !ok { + pathCnt := len(host.GetCheckpoints()) + backupPaths[pathCnt]++ } + backupStorages[utils.StringifyAddr(host.GetHost())] = true } } - - if s.Size() > len(r.storageNodes) { - return fmt.Errorf("The physical topology of storage must be consistent") + if r.hosts.StorageCount() != len(backupStorages) { + return fmt.Errorf("the physical topology of storage count must be consistent, cluster: %d, backup: %d", + r.hosts.StorageCount(), len(backupStorages)) } - if maxInfoLen != len(r.storageNodes[0].DataDir) { - return fmt.Errorf("The number of data directories for storage must be the same") + clusterPaths := r.hosts.StoragePaths() + if !reflect.DeepEqual(backupPaths, clusterPaths) { + log.WithField("backup", backupPaths).WithField("cluster", clusterPaths).Error("Path distribution is not consistent") + return fmt.Errorf("the physical topology is not consistent, path distribution is not consistent") } return nil } -func (r *Restore) check() error { - nodes := append(r.metaNodes, r.storageNodes...) - command := r.backend.CheckCommand() - return remote.CheckCommand(command, nodes, r.log) -} +func (r *Restore) checkAndDropSpaces(info map[nebula.GraphSpaceID]*meta.SpaceBackupInfo) error { + for sid, backup := range info { + resp, err := r.meta.GetSpace(backup.Space.SpaceName) + if err != nil { + return fmt.Errorf("get info of space %s failed: %w", string(backup.Space.SpaceName), err) + } -func (r *Restore) downloadMetaFile() error { - r.metaFileName = r.config.BackupName + ".meta" - cmdStr := r.backend.RestoreMetaFileCommand(r.metaFileName, "/tmp/") - r.log.Info("download metafile", zap.Strings("cmd", cmdStr)) - cmd := exec.Command(cmdStr[0], cmdStr[1:]...) - err := cmd.Run() - if err != nil { - return err + if resp.GetCode() == nebula.ErrorCode_E_SPACE_NOT_FOUND { + continue + } + if resp.GetCode() == nebula.ErrorCode_SUCCEEDED { + if resp.Item.SpaceID != sid { + return fmt.Errorf("space to resotre already exist and the space id is not consistent, name: %s, backup: %d, cluster: %d", + string(backup.Space.SpaceName), sid, resp.Item.SpaceID) + } + } else { + return fmt.Errorf("get info of space %s failed: %s", + string(backup.Space.SpaceName), resp.GetCode().String()) + } + } + + for _, backup := range info { + err := r.meta.DropSpace(backup.Space.SpaceName, true) + if err != nil { + return fmt.Errorf("drop space %s failed: %w", string(backup.Space.SpaceName), err) + } } - cmd.Wait() return nil } -func (r *Restore) restoreMetaFile() (*meta.BackupMeta, error) { - filename := "/tmp/" + r.metaFileName // downloaded file before - m, err := utils.GetMetaFromFile(r.log, filename) - if m == nil { - r.log.Error("failed to get meta", zap.String("file", filename), - zap.Error(err)) - } - return m, err -} +func (r *Restore) backupOriginal(allspaces bool) error { + r.backSuffix = GetBackupSuffix() -func (r *Restore) downloadMeta(g *errgroup.Group, file []string) map[string][][]byte { - sstFiles := make(map[string][][]byte) - for _, n := range r.metaNodes { - ipAddr := strings.Split(n.Addrs, ":") - r.ctx.RemoteAddr = ipAddr[0] - - cmd, files := r.backend.RestoreMetaCommand(file, n.DataDir[0]) - - func(addr string, user string, cmd string, log *zap.Logger) { - g.Go(func() error { - client, err := remote.NewClient(addr, user, log) - if err != nil { - return err - } - defer client.Close() - return client.ExecCommandBySSH(cmd) - }) - }(ipAddr[0], n.User, cmd, r.log) - var byteFile [][]byte - for _, f := range files { - byteFile = append(byteFile, []byte(f)) - } - sstFiles[n.Addrs] = byteFile - } - return sstFiles -} + for _, s := range r.hosts.GetStorages() { + agent, err := r.agentMgr.GetAgentFor(s.GetAddr()) + if err != nil { + return fmt.Errorf("get agent for storaged %s failed: %w", + utils.StringifyAddr(s.GetAddr()), err) + } -func (r *Restore) downloadStorage(g *errgroup.Group, info map[nebula.GraphSpaceID]*meta.SpaceBackupInfo) map[string]string { - idMap := make(map[string][]string) - var cpHosts []string - for gid, bInfo := range info { - for _, i := range bInfo.Info { - idStr := strconv.FormatInt(int64(gid), 10) - if idMap[metaclient.HostaddrToString(i.Host)] == nil { - cpHosts = append(cpHosts, metaclient.HostaddrToString(i.Host)) + logger := log.WithField("addr", utils.StringifyAddr(s.GetAddr())) + for _, d := range s.Dir.Data { + opath := filepath.Join(string(d), "nebula") + bpath := fmt.Sprintf("%s%s", opath, r.backSuffix) + req := &pb.MoveDirRequest{ + SrcPath: opath, + DstPath: bpath, + } + _, err = agent.MoveDir(req) + if err != nil { + return fmt.Errorf("move dir from %s to %s failed: %w", opath, bpath, err) } - idMap[metaclient.HostaddrToString(i.Host)] = append(idMap[metaclient.HostaddrToString(i.Host)], idStr) - } - } - - for i, osvr := range cpHosts { - r.log.Sugar().Infof( - "storage server moved from old: %s to new: %s, contained graphspaceid: %v", - osvr, - r.storageNodes[i].Addrs, - idMap[osvr]) - } - - sort.Strings(cpHosts) - storageIPmap := make(map[string]string) - for i, ip := range cpHosts { - ids := idMap[ip] - sNode := r.storageNodes[i] - r.log.Info("download", zap.String("ip", ip), zap.String("storage", sNode.Addrs)) - var nebulaDirs []string - for _, d := range sNode.DataDir { - nebulaDirs = append(nebulaDirs, d+"/nebula") - } - r.ctx.RemoteAddr = ip - cmds := r.backend.RestoreStorageCommand(ip, ids, nebulaDirs) - addr := strings.Split(sNode.Addrs, ":") - if ip != sNode.Addrs { - storageIPmap[ip] = sNode.Addrs - } - for _, cmd := range cmds { - func(addr string, user string, cmd string, log *zap.Logger) { - g.Go(func() error { - client, err := remote.NewClient(addr, user, log) - if err != nil { - return err - } - defer client.Close() - return client.ExecCommandBySSH(cmd) - }) - }(addr[0], sNode.User, cmd, r.log) - } - } - return storageIPmap -} -func stringToHostAddr(host string) (*nebula.HostAddr, error) { - ipAddr := strings.Split(host, ":") - port, err := strconv.ParseInt(ipAddr[1], 10, 32) - if err != nil { - return nil, err + logger.WithField("origin path", opath). + WithField("backup path", bpath). + Info("Backup origin storage data path successfully") + } } - return &nebula.HostAddr{ipAddr[0], nebula.Port(port)}, nil -} -func sendRestoreMeta(addr string, files [][]byte, hostMap []*meta.HostPair, log *zap.Logger) error { + if allspaces { + for _, m := range r.hosts.GetMetas() { + agent, err := r.agentMgr.GetAgentFor(m.GetAddr()) + if err != nil { + return fmt.Errorf("get agent for metad %s failed: %w", + utils.StringifyAddr(m.GetAddr()), err) + } - // retry 3 times if restore failed - count := 3 - for { - if count == 0 { - return restoreFailed - } - client := metaclient.NewMetaClient(log) - err := client.Open(addr) - if err != nil { - log.Error("open meta failed", zap.Error(err), zap.String("addr", addr)) - time.Sleep(time.Second * 2) - continue - } - defer client.Close() + if len(m.Dir.Data) != 1 { + return fmt.Errorf("meta service: %s should only have one data dir, but %d", + utils.StringifyAddr(m.GetAddr()), len(m.Dir.Data)) + } - restoreReq := meta.NewRestoreMetaReq() - restoreReq.Hosts = hostMap - restoreReq.Files = files + opath := fmt.Sprintf("%s/nebula", string(m.Dir.Data[0])) + bpath := fmt.Sprintf("%s%s", opath, r.backSuffix) - resp, err := client.RestoreMeta(restoreReq) - if err != nil { - // maybe we should retry - log.Error("restore failed", zap.Error(err), zap.String("addr", addr)) - time.Sleep(time.Second * 2) - count-- - continue - } + req := &pb.MoveDirRequest{ + SrcPath: opath, + DstPath: bpath, + } + _, err = agent.MoveDir(req) + if err != nil { + return fmt.Errorf("move dir from %s to %s failed: %w", opath, bpath, err) + } - if resp.GetCode() != nebula.ErrorCode_SUCCEEDED { - log.Error("restore failed", zap.String("error code", resp.GetCode().String()), zap.String("addr", addr)) - time.Sleep(time.Second * 2) - count-- - continue + log.WithField("addr", utils.StringifyAddr(m.GetAddr())). + WithField("origin path", opath). + WithField("backup path", bpath). + Info("Backup origin meta data path successfully") } - log.Info("restore succeeded", zap.String("addr", addr)) - return nil + } + return nil } -func (r *Restore) restoreMeta(sstFiles map[string][][]byte, storageIDMap map[string]string) error { - r.log.Info("restoreMeta") - var hostMap []*meta.HostPair +func (r *Restore) downloadMeta() error { + // {backupRoot}/{backupName}/meta/*.sst + externalUri, _ := utils.UriJoin(r.rootUri, r.backupName, "meta") + backend, err := r.sto.GetDir(r.ctx, externalUri) + if err != nil { + return fmt.Errorf("get storage backend for %s failed: %w", externalUri, err) + } - for k, v := range storageIDMap { - fromAddr, err := stringToHostAddr(k) + // download meta backup files to every meta service + for _, s := range r.hosts.GetMetas() { + agent, err := r.agentMgr.GetAgentFor(s.GetAddr()) if err != nil { - return err + return fmt.Errorf("get agent for metad %s failed: %w", + utils.StringifyAddr(s.GetAddr()), err) + } + + // meta kv data path: {nebulaData}/meta + localDir := string(s.Dir.Data[0]) + req := &pb.DownloadFileRequest{ + SourceBackend: backend, + TargetPath: localDir, + Recursively: true, } - toAddr, err := stringToHostAddr(v) + _, err = agent.DownloadFile(req) if err != nil { - return err + return fmt.Errorf("download meta files from %s to %s failed: %w", externalUri, localDir, err) } - - r.log.Info("restoreMeta host mapping", zap.String("fromAddr", fromAddr.String()), zap.String("toAddr", toAddr.String())) - pair := &meta.HostPair{fromAddr, toAddr} - hostMap = append(hostMap, pair) - } - r.log.Info("restoreMeta2", zap.Int("metaNode len:", len(r.metaNodes))) - - g, _ := errgroup.WithContext(context.Background()) - for _, n := range r.metaNodes { - r.log.Info("will restore meta", zap.String("addr", n.Addrs)) - addr := n.Addrs - func(addr string, files [][]byte, hostMap []*meta.HostPair, log *zap.Logger) { - g.Go(func() error { return sendRestoreMeta(addr, files, hostMap, r.log) }) - }(addr, sstFiles[n.Addrs], hostMap, r.log) } - r.log.Info("restoreMeta3") - err := g.Wait() - if err != nil { - return err - } return nil } -func remoteCommandConcurrentRunner(g *errgroup.Group, addr string, user string, cmd string, log *zap.Logger) { - if cmd == "" { - log.Warn("cmd is empty", zap.Stack("empty cmd stack trace")) - return - } - runner := func() error { - client, err := remote.NewClient(addr, user, log) - if err != nil { - return err - } - defer client.Close() - return client.ExecCommandBySSH(cmd) - } - g.Go(runner) -} - -func (r *Restore) cleanupOriginalBackup() error { - g, _ := errgroup.WithContext(context.Background()) - for _, node := range r.storageNodes { - for _, d := range node.DataDir { - orgpath := d + "/nebula" - bakpath := orgpath + r.backSuffix +func (r *Restore) downloadStorage(backup *meta.BackupMeta) (map[string]string, error) { + // TODO(spw): only support same ip now, by sorting address + // could match by label(or id) in the future, now suppose the label is ip. - cmd := r.backend.RestoreStoragePostCommand(bakpath) - ipAddr := strings.Split(node.Addrs, ":")[0] - remoteCommandConcurrentRunner(g, ipAddr, node.User, cmd, r.log) + // current cluster storage serivce list + currList := r.hosts.GetStorages() + sort.Slice(currList, func(i, j int) bool { + if currList[i].Addr.Host != currList[j].Addr.Host { + return currList[i].Addr.Host < currList[j].Addr.Host } - } - - for _, node := range r.metaNodes { - for _, d := range node.DataDir { - orgpath := d + "/nebula" - bakpath := orgpath + r.backSuffix + return currList[i].Addr.Port < currList[j].Addr.Port + }) - cmd := r.backend.RestoreMetaPostCommand(bakpath) - ipAddr := strings.Split(node.Addrs, ":")[0] - remoteCommandConcurrentRunner(g, ipAddr, node.User, cmd, r.log) + // previous backup storage service list + prevMap := make(map[string]*nebula.HostAddr) + for _, sb := range backup.SpaceBackups { + for _, hb := range sb.HostBackups { + prevMap[utils.StringifyAddr(hb.GetHost())] = hb.GetHost() } } - err := g.Wait() - if err != nil { - return err + prevList := make([]*nebula.HostAddr, 0, len(prevMap)) + for _, addr := range prevMap { + prevList = append(prevList, addr) } - return nil -} - -func (r *Restore) backupOriginal() ([]string, error) { - bak_resources_urls := []string{} - g, _ := errgroup.WithContext(context.Background()) - - for _, node := range r.storageNodes { - for _, d := range node.DataDir { - orgpath := d + "/nebula" - bakpath := orgpath + r.backSuffix + sort.Slice(prevList, func(i, j int) bool { + if prevList[i].Host != prevList[j].Host { + return prevList[i].Host < prevList[j].Host + } + return prevList[i].Port < prevList[j].Port + }) - cmd := r.backend.RestoreStoragePreCommand(orgpath, bakpath) - ipAddr := strings.Split(node.Addrs, ":")[0] - remoteCommandConcurrentRunner(g, ipAddr, node.User, cmd, r.log) - bak_resources_urls = append(bak_resources_urls, "STORAGE_"+ipAddr+":"+bakpath) + // download from previous to current one host by another + serviceMap := make(map[string]string) + // {backupRoot}/{backupName}/data/{addr}/data{0..n}/ + storageUri, _ := utils.UriJoin(r.rootUri, r.backupName, "data") + for idx, s := range currList { + agent, err := r.agentMgr.GetAgentFor(s.GetAddr()) + if err != nil { + return nil, fmt.Errorf("get agent for storaged %s failed: %w", + utils.StringifyAddr(s.GetAddr()), err) } - } - for _, node := range r.metaNodes { - for _, d := range node.DataDir { - orgpath := d + "/nebula" - bakpath := orgpath + r.backSuffix + logger := log.WithField("addr", utils.StringifyAddr(s.GetAddr())) + for i, d := range s.Dir.Data { + // {backupRoot}/{backupName}/data/{addr}/data{0..n}/ + externalUri, _ := utils.UriJoin(storageUri, utils.StringifyAddr(prevList[idx]), fmt.Sprintf("data%d", i)) + backend, err := r.sto.GetDir(r.ctx, externalUri) + if err != nil { + return nil, fmt.Errorf("get storage backend for %s failed: %w", externalUri, err) + } + // {nebulaDataPath}/storage/nebula + localDir := filepath.Join(string(d), "nebula") + + req := &pb.DownloadFileRequest{ + SourceBackend: backend, + TargetPath: localDir, + Recursively: true, + } - cmd := r.backend.RestoreMetaPreCommand(orgpath, bakpath) - ipAddr := strings.Split(node.Addrs, ":")[0] - remoteCommandConcurrentRunner(g, ipAddr, node.User, cmd, r.log) - bak_resources_urls = append(bak_resources_urls, "META_"+ipAddr+":"+bakpath) + _, err = agent.DownloadFile(req) + if err != nil { + return nil, fmt.Errorf("download from %s to %s:%s failed: %w", + externalUri, localDir, utils.StringifyAddr(s.GetAddr()), err) + } + logger.WithField("external", externalUri). + WithField("local", localDir).Info("Download storage data successfully") } - } - err := g.Wait() - if err != nil { - return []string{}, err - } - return bak_resources_urls, nil -} -func (r *Restore) stopCluster() error { - g, _ := errgroup.WithContext(context.Background()) - for _, node := range r.storageNodes { - cmd := "cd " + node.RootDir + " && scripts/nebula.service stop storaged" - ipAddr := strings.Split(node.Addrs, ":")[0] - - func(addr string, user string, cmd string, log *zap.Logger) { - g.Go(func() error { - client, err := remote.NewClient(addr, user, log) - if err != nil { - return err - } - defer client.Close() - return client.ExecCommandBySSH(cmd) - }) - }(ipAddr, node.User, cmd, r.log) - } - - for _, node := range r.metaNodes { - cmd := "cd " + node.RootDir + " && scripts/nebula.service stop metad" - ipAddr := strings.Split(node.Addrs, ":")[0] - func(addr string, user string, cmd string, log *zap.Logger) { - g.Go(func() error { - client, err := remote.NewClient(addr, user, log) - if err != nil { - return err - } - defer client.Close() - return client.ExecCommandBySSH(cmd) - }) - }(ipAddr, node.User, cmd, r.log) - } - - err := g.Wait() - if err != nil { - return err + serviceMap[utils.StringifyAddr(prevList[idx])] = utils.StringifyAddr(s.GetAddr()) } - return nil + return serviceMap, nil } func (r *Restore) startMetaService() error { - g, _ := errgroup.WithContext(context.Background()) - for _, node := range r.metaNodes { - cmd := "cd " + node.RootDir + " && scripts/nebula.service start metad &>/dev/null &" - ipAddr := strings.Split(node.Addrs, ":")[0] - func(addr string, user string, cmd string, log *zap.Logger) { - g.Go(func() error { - client, err := remote.NewClient(addr, user, log) - if err != nil { - return err - } - defer client.Close() - return client.ExecCommandBySSH(cmd) - }) - }(ipAddr, node.User, cmd, r.log) - } - - err := g.Wait() - if err != nil { - return err - } + for _, meta := range r.hosts.GetMetas() { + agent, err := r.agentMgr.GetAgentFor(meta.GetAddr()) + if err != nil { + return fmt.Errorf("get agent for metad %s failed: %w", + utils.StringifyAddr(meta.GetAddr()), err) + } - return nil -} + req := &pb.StartServiceRequest{ + Role: pb.ServiceRole_META, + Dir: string(meta.Dir.Root), + } -func (r *Restore) startStorageService() error { - g, _ := errgroup.WithContext(context.Background()) - for _, node := range r.storageNodes { - cmd := "cd " + node.RootDir + " && scripts/nebula.service start storaged &>/dev/null &" - ipAddr := strings.Split(node.Addrs, ":")[0] - func(addr string, user string, cmd string, log *zap.Logger) { - g.Go(func() error { - client, err := remote.NewClient(addr, user, log) - if err != nil { - return err - } - defer client.Close() - return client.ExecCommandBySSH(cmd) - }) - }(ipAddr, node.User, cmd, r.log) - } - - err := g.Wait() - if err != nil { - return err + _, err = agent.StartService(req) + if err != nil { + return fmt.Errorf("start meta service %s by agent failed: %w", + utils.StringifyAddr(meta.GetAddr()), err) + } + log.WithField("addr", utils.StringifyAddr(meta.GetAddr())). + Info("Start meta service successfully") } return nil } -func (r *Restore) listCluster() (*meta.ListClusterInfoResp, error) { - r.metaLeader = r.config.Meta - - for { - client := metaclient.NewMetaClient(r.log) - err := client.Open(r.metaLeader) +func (r *Restore) stopCluster() error { + rootDirs := r.hosts.GetRootDirs() + for _, agentAddr := range r.hosts.GetAgents() { + agent, err := r.agentMgr.GetAgent(agentAddr) if err != nil { - return nil, err + return fmt.Errorf("get agent %s failed: %w", utils.StringifyAddr(agentAddr), err) } - listReq := meta.NewListClusterInfoReq() - defer client.Close() - - resp, err := client.ListCluster(listReq) - if err != nil { - return nil, err + dirs, ok := rootDirs[agentAddr.Host] + if !ok { + log.WithField("host", agentAddr.Host).Info("Does not find nebula root dirs in this host") + continue } - if resp.GetCode() != nebula.ErrorCode_E_LEADER_CHANGED && resp.GetCode() != nebula.ErrorCode_SUCCEEDED { - r.log.Error("list cluster failed", zap.String("error code", resp.GetCode().String())) - return nil, listClusterFailed + logger := log.WithField("host", agentAddr.Host) + for _, d := range dirs { + req := &pb.StopServiceRequest{ + Role: pb.ServiceRole_ALL, + Dir: d.Dir, + } + logger.WithField("dir", d.Dir).Info("Stop services") + _, err := agent.StopService(req) + if err != nil { + return fmt.Errorf("stop services in host %s failed: %w", agentAddr.Host, err) + } } + } + return nil +} - if resp.GetCode() == nebula.ErrorCode_SUCCEEDED { - return resp, nil +func (r *Restore) restoreMeta(backup *meta.BackupMeta, storageMap map[string]string) error { + addrMap := make([]*meta.HostPair, 0, len(storageMap)) + for from, to := range storageMap { + fromaddr, err := utils.ParseAddr(from) + if err != nil { + return fmt.Errorf("parse %s failed: %w", from, err) } - - leader := resp.GetLeader() - if leader == meta.ExecResp_Leader_DEFAULT { - return nil, LeaderNotFoundError + toaddr, err := utils.ParseAddr(to) + if err != nil { + return fmt.Errorf("parse %s failed: %w", to, err) } - r.log.Info("leader changed", zap.String("leader", leader.String())) - r.metaLeader = metaclient.HostaddrToString(leader) + addrMap = append(addrMap, &meta.HostPair{FromHost: fromaddr, ToHost: toaddr}) } -} - -func (r *Restore) getMetaInfo(hosts []*nebula.HostAddr) ([]config.NodeInfo, error) { - var info []config.NodeInfo - - if len(hosts) == 0 { - r.log.Warn("meta host list is nil") - return nil, listClusterFailed - } + for _, meta := range r.hosts.GetMetas() { + metaSsts := make([]string, 0, len(backup.GetMetaFiles())) + for _, f := range backup.GetMetaFiles() { + // TODO(spw): data folder end with '/'? + sstPath := fmt.Sprintf("%s/%s", string(meta.Dir.Data[0]), string(f)) + metaSsts = append(metaSsts, sstPath) + } - for _, v := range hosts { - client := metaclient.NewMetaClient(r.log) - addr := metaclient.HostaddrToString(v) - r.log.Info("will get meta info", zap.String("addr", addr)) - err := client.Open(addr) + err := r.meta.RestoreMeta(meta.GetAddr(), addrMap, metaSsts) if err != nil { - return nil, err + return fmt.Errorf("restore meta service %s failed: %w", + utils.StringifyAddr(meta.GetAddr()), err) } - dirReq := meta.NewGetMetaDirInfoReq() - defer client.Close() + log.WithField("addr", utils.StringifyAddr(meta.GetAddr())). + Info("restore backup in this metad successfully") + } + + return nil +} - resp, err := client.ListMetaDir(dirReq) +func (r *Restore) startStorageService() error { + for _, s := range r.hosts.GetStorages() { + agent, err := r.agentMgr.GetAgentFor(s.GetAddr()) if err != nil { - return nil, err + return fmt.Errorf("get agent for storaged %s failed: %w", + utils.StringifyAddr(s.GetAddr()), err) } - if resp.GetCode() != nebula.ErrorCode_SUCCEEDED { - r.log.Error("list cluster failed", zap.String("error code", resp.GetCode().String())) - return nil, listClusterFailed + req := &pb.StartServiceRequest{ + Role: pb.ServiceRole_STORAGE, + Dir: string(s.GetDir().GetRoot()), } - var datadir []string - - for _, d := range resp.Dir.Data { - datadir = append(datadir, string(d[0:])) + _, err = agent.StartService(req) + if err != nil { + return fmt.Errorf("start storaged by agent failed: %w", err) } - info = append(info, config.NodeInfo{Addrs: metaclient.HostaddrToString(v), - User: r.config.User, RootDir: string(resp.Dir.Root[0:]), DataDir: datadir}) + log.WithField("addr", utils.StringifyAddr(s.GetAddr())). + Info("Start storaged by agent successfully") } - return info, nil -} -func (r *Restore) setStorageInfo(resp *meta.ListClusterInfoResp) { - for _, v := range resp.StorageServers { - var datadir []string + return nil +} - for _, d := range v.Dir.Data { - datadir = append(datadir, string(d[0:])) +func (r *Restore) startGraphService() error { + for _, s := range r.hosts.GetGraphs() { + agent, err := r.agentMgr.GetAgentFor(s.GetAddr()) + if err != nil { + return fmt.Errorf("get agent for graphd %s failed: %w", + utils.StringifyAddr(s.GetAddr()), err) } - r.storageNodes = append(r.storageNodes, config.NodeInfo{Addrs: metaclient.HostaddrToString(v.Host), - User: r.config.User, RootDir: string(v.Dir.Root[0:]), DataDir: datadir}) + req := &pb.StartServiceRequest{ + Role: pb.ServiceRole_GRAPH, + Dir: string(s.GetDir().GetRoot()), + } + _, err = agent.StartService(req) + if err != nil { + return fmt.Errorf("start graphd by agent failed: %w", err) + } + log.WithField("addr", utils.StringifyAddr(s.GetAddr())). + Info("Start graphd by agent successfully") } - sort.SliceStable(r.storageNodes, func(i, j int) bool { - return r.storageNodes[i].Addrs < r.storageNodes[j].Addrs - }) + return nil } -func (r *Restore) checkSpace(m *meta.BackupMeta) error { - var client *metaclient.MetaClient - reCreate := true - - for gid, info := range m.GetBackupInfo() { - for { - if reCreate { - client = metaclient.NewMetaClient(r.log) - err := client.Open(r.metaLeader) - if err != nil { - return err - } - } - spaceReq := meta.NewGetSpaceReq() - defer client.Close() - spaceReq.SpaceName = info.Space.SpaceName - - resp, err := client.GetSpaceInfo(spaceReq) - if err != nil { - return err - } - - if resp.GetCode() == nebula.ErrorCode_E_SPACE_NOT_FOUND { - reCreate = false - break - } - - if resp.GetCode() != nebula.ErrorCode_E_LEADER_CHANGED && resp.GetCode() != nebula.ErrorCode_SUCCEEDED { - r.log.Error("get space failed", zap.String("error code", resp.GetCode().String())) - return getSpaceFailed - } - - if resp.GetCode() == nebula.ErrorCode_SUCCEEDED { - if resp.Item.SpaceID != gid { - r.log.Error("space not matching", zap.String("spacename", string(info.Space.SpaceName[0:])), - zap.Int32("gid", int32(gid)), zap.Int32("gid in server", int32(resp.Item.SpaceID))) - return spaceNotMatching - } - reCreate = false - break - } - - leader := resp.GetLeader() - if leader == meta.ExecResp_Leader_DEFAULT { - return LeaderNotFoundError - } +func (r *Restore) cleanupOriginalData() error { + for _, m := range r.hosts.GetMetas() { + agent, err := r.agentMgr.GetAgentFor(m.GetAddr()) + if err != nil { + return fmt.Errorf("get agent for metad %s failed: %w", + utils.StringifyAddr(m.GetAddr()), err) + } - r.log.Info("leader changed", zap.String("leader", leader.String())) - r.metaLeader = metaclient.HostaddrToString(leader) - reCreate = true + req := &pb.RemoveDirRequest{ + Path: fmt.Sprintf("%s/nebula%s", string(m.Dir.Data[0]), r.backSuffix), + } + _, err = agent.RemoveDir(req) + if err != nil { + return fmt.Errorf("remove meta data dir %s by agent failed: %w", req.Path, err) } + log.WithField("addr", utils.StringifyAddr(m.GetAddr())). + WithField("path", req.Path).Info("Remove meta origin data successfully.") } - return nil -} -func (r *Restore) dropSpace(m *meta.BackupMeta) error { - - var client *metaclient.MetaClient - reCreate := true + for _, s := range r.hosts.GetStorages() { + agent, err := r.agentMgr.GetAgentFor(s.GetAddr()) + if err != nil { + return fmt.Errorf("get agent for storaged %s failed: %w", + utils.StringifyAddr(s.GetAddr()), err) + } - for _, info := range m.GetBackupInfo() { - for { - if reCreate { - client = metaclient.NewMetaClient(r.log) - err := client.Open(r.metaLeader) - if err != nil { - return err - } + logger := log.WithField("addr", utils.StringifyAddr(s.GetAddr())) + for _, dir := range s.Dir.Data { + req := &pb.RemoveDirRequest{ + Path: fmt.Sprintf("%s/nebula%s", string(dir), r.backSuffix), } - - dropReq := meta.NewDropSpaceReq() - defer client.Close() - dropReq.SpaceName = info.Space.SpaceName - dropReq.IfExists = true - - resp, err := client.DropSpace(dropReq) + _, err = agent.RemoveDir(req) if err != nil { - return err - } - - if resp.GetCode() != nebula.ErrorCode_E_LEADER_CHANGED && resp.GetCode() != nebula.ErrorCode_SUCCEEDED { - r.log.Error("drop space failed", zap.String("error code", resp.GetCode().String())) - return dropSpaceFailed - } - - if resp.GetCode() == nebula.ErrorCode_SUCCEEDED { - reCreate = false - break - } - - leader := resp.GetLeader() - if leader == meta.ExecResp_Leader_DEFAULT { - return LeaderNotFoundError + return fmt.Errorf("remove storage data dir %s by agent failed: %w", req.Path, err) } - - r.log.Info("leader changed", zap.String("leader", leader.String())) - r.metaLeader = metaclient.HostaddrToString(leader) - reCreate = true + logger.WithField("path", req.Path).Info("Remove storage origin data successfully.") } } return nil } -func (r *Restore) RestoreCluster() error { - - resp, err := r.listCluster() +// backup_root/backup_name +// - meta +// - xxx.sst +// - ... +// - data +// - backup_name.meta +func (r *Restore) Restore() error { + logger := log.WithField("backup", r.cfg.BackupName) + // check backup dir existence + rootUri, err := utils.UriJoin(r.cfg.Backend.Uri(), r.cfg.BackupName) if err != nil { - r.log.Error("list cluster info failed", zap.Error(err)) return err } - r.log.Info("listcluster result", zap.String("listcluster", metaclient.ListClusterInfoRespToString(resp))) - - r.setStorageInfo(resp) - - metaInfo, err := r.getMetaInfo(resp.GetMetaServers()) - if err != nil { - return err + exist := r.sto.ExistDir(r.ctx, rootUri) + if !exist { + return fmt.Errorf("backup dir %s does not exist", rootUri) } - r.metaNodes = metaInfo - - // show target cluster map - // r.storageNodes - // r.metaNodes + logger.WithField("uri", rootUri).Info("Check backup dir successfully") - for _, m := range metaInfo { - r.log.Info("meta node", zap.String("node addr", m.Addrs)) - } - - err = r.check() - - if err != nil { - r.log.Error("restore check failed", zap.Error(err)) + // download and parse backup meta file + if err := utils.EnsureDir(utils.LocalTmpDir); err != nil { return err } + defer func() { + if err := utils.RemoveDir(utils.LocalTmpDir); err != nil { + log.WithError(err).Errorf("Remove tmp dir %s failed", utils.LocalTmpDir) + } + }() - err = r.downloadMetaFile() + backupMetaName := fmt.Sprintf("%s.meta", r.cfg.BackupName) + metaUri, _ := utils.UriJoin(rootUri, backupMetaName) + tmpLocalPath := filepath.Join(utils.LocalTmpDir, backupMetaName) + err = r.sto.Download(r.ctx, tmpLocalPath, metaUri, false) if err != nil { - r.log.Error("download meta file failed", zap.Error(err)) - return err + return fmt.Errorf("download %s to %s failed: %w", metaUri, tmpLocalPath, err) } - - m, err := r.restoreMetaFile() - + bakMeta, err := utils.ParseMetaFromFile(tmpLocalPath) if err != nil { - r.log.Error("restore meta file failed", zap.Error(err)) - return err + return fmt.Errorf("parse backup meta file %s failed: %w", tmpLocalPath, err) } - err = r.checkPhysicalTopology(m.BackupInfo) + // check this cluster's topology with info kept in backup meta + err = r.checkPhysicalTopology(bakMeta.GetSpaceBackups()) if err != nil { - r.log.Error("check physical failed", zap.Error(err)) - return err + return fmt.Errorf("physical topology not consistent: %w", err) } - r.backSuffix = storage.GetBackupDirSuffix() - r.log.Info("restore target's backup suffix", zap.String("suffix", r.backSuffix)) - if !m.IncludeSystemSpace { - err = r.checkSpace(m) + // if only resotre some spaces, check and remove these spaces + if !bakMeta.AllSpaces { + err = r.checkAndDropSpaces(bakMeta.SpaceBackups) if err != nil { - r.log.Error("check space failed", zap.Error(err)) - return err - } - err = r.dropSpace(m) - if err != nil { - r.log.Error("drop space faile", zap.Error(err)) - return err + return fmt.Errorf("check and drop space failed: %w", err) } + log.Info("Check and drop spaces successfully") } + // stop cluster err = r.stopCluster() if err != nil { - r.log.Error("stop cluster failed", zap.Error(err)) - return err + return fmt.Errorf("stop cluster failed: %w", err) } + logger.Info("Stop cluster successfully") - if m.IncludeSystemSpace { - var bakUrls []string - bakUrls, err = r.backupOriginal() - if err != nil { - r.log.Error("backup original failed", zap.Error(err)) - return err - } - r.log.Info("success backup original data", zap.Strings("backup data urls", bakUrls)) + // backup original data if we are to restore whole cluster + err = r.backupOriginal(bakMeta.AllSpaces) + if err != nil { + return fmt.Errorf("backup origin data path failed: %w", err) } + logger.Info("Backup origin cluster data successfully") - g, _ := errgroup.WithContext(context.Background()) - - var files []string - - for _, f := range m.MetaFiles { - files = append(files, string(f[:])) + // download backup data from external storage to cluster + err = r.downloadMeta() + if err != nil { + return fmt.Errorf("download meta data to cluster failed: %w", err) } - - sstFiles := r.downloadMeta(g, files) - storageIDMap := r.downloadStorage(g, m.BackupInfo) - - err = g.Wait() + log.Info("Download meta data to cluster successfully.") + storageMap, err := r.downloadStorage(bakMeta) if err != nil { - r.log.Error("restore error") - return err + return fmt.Errorf("download storage data to cluster failed: %w", err) } + log.Info("Download storage data to cluster successfully.") + // start meta service first err = r.startMetaService() if err != nil { - r.log.Error("start cluster failed", zap.Error(err)) - return err + return fmt.Errorf("start meta service failed: %w", err) } - time.Sleep(time.Second * 3) + log.Info("Start meta service successfully.") - err = r.restoreMeta(sstFiles, storageIDMap) + // restore meta service by map + err = r.restoreMeta(bakMeta, storageMap) if err != nil { - r.log.Error("restore meta file failed", zap.Error(err)) - return err + return fmt.Errorf("restore cluster meta failed: %w", err) } + log.Info("Restore meta service successfully.") + // strat storage and graph service err = r.startStorageService() if err != nil { - r.log.Error("start storage service failed", zap.Error(err)) - return err + return fmt.Errorf("start storage service failed: %w", err) } - - r.log.Info("restore meta successed") + err = r.startGraphService() + if err != nil { + return fmt.Errorf("start graph service failed: %w", err) + } + log.Info("Start storage and graph services successfully") // after success restore, cleanup the backup data if needed - if m.IncludeSystemSpace { - err = r.cleanupOriginalBackup() - if err != nil { - r.log.Warn("cleanup backup data failed", zap.Error(err)) - } + err = r.cleanupOriginalData() + if err != nil { + return fmt.Errorf("clean up origin data failed: %w", err) } + log.Info("Cleanup origin data successfully") return nil } diff --git a/pkg/show/show.go b/pkg/show/show.go index 25587d2..8f47181 100644 --- a/pkg/show/show.go +++ b/pkg/show/show.go @@ -1,122 +1,165 @@ package show import ( + "context" + "fmt" "os" - "os/exec" + "path/filepath" "strconv" + "strings" "time" _ "github.com/facebook/fbthrift/thrift/lib/go/thrift" "github.com/olekukonko/tablewriter" - "github.com/vesoft-inc/nebula-br/pkg/storage" + log "github.com/sirupsen/logrus" + "github.com/vesoft-inc/nebula-agent/pkg/storage" + "github.com/vesoft-inc/nebula-br/pkg/config" "github.com/vesoft-inc/nebula-br/pkg/utils" _ "github.com/vesoft-inc/nebula-go/v2/nebula/meta" - "go.uber.org/zap" ) type Show struct { - backend storage.ExternalStorage - backupFiles []string - log *zap.Logger -} + ctx context.Context + sto storage.ExternalStorage + cfg *config.ShowConfig -type showInfo struct { - BackupName string `json:"name"` - CreateTime string `json:"create_time"` - Spaces []string `json:"spaces"` - Full bool `json:"full"` - IncludeSystemSpace bool `json:"specify_space"` + backupNames []string } -var tableHeader []string = []string{"name", "create_time", "spaces", "full_backup", "specify_space"} +type backupInfo struct { + BackupName string `json:"name"` + CreateTime string `json:"create_time"` + Spaces []string `json:"spaces"` + Full bool `json:"full"` + AllSpaces bool `json:"all_spaces"` +} -func NewShow(backendUrl string, log *zap.Logger) *Show { - backend, err := storage.NewExternalStorage(backendUrl, log, 5, "", nil) - if err != nil { - log.Error("new external storage failed", zap.Error(err)) - return nil +func (b *backupInfo) StringTable() []string { + broken_info := []string{"", "backup is broken", "N/A", "N/A", "N/A"} + if b == nil { + return broken_info } - return &Show{log: log, backend: backend} + + var table []string + table = append(table, string(b.BackupName)) + table = append(table, b.CreateTime) + table = append(table, strings.Join(b.Spaces, ",")) + table = append(table, strconv.FormatBool(b.Full)) + table = append(table, strconv.FormatBool(b.AllSpaces)) + return table } -func (r *Show) readMetaFile(metaName string) ([]string, error) { - filename := "/tmp/" + metaName - m, err := utils.GetMetaFromFile(r.log, filename) - if m == nil { - r.log.Error("failed to get meta", zap.String("file", filename), - zap.Error(err)) - return nil, err +var tableHeader []string = []string{"name", "create_time", "spaces", "full_backup", "all_spaces"} + +func NewShow(ctx context.Context, cfg *config.ShowConfig) (*Show, error) { + s, err := storage.New(cfg.Backend) + if err != nil { + return nil, fmt.Errorf("create external storage failed: %w", err) } - var spaces string - for _, s := range m.BackupInfo { - if len(spaces) != 0 { - spaces += "," - } - spaces += string(s.Space.SpaceName) + dirNames, err := s.ListDir(ctx, cfg.Backend.Uri()) + if err != nil { + return nil, fmt.Errorf("list dir failed: %w", err) } + log.WithField("prefix", cfg.Backend.Uri()).WithField("backup names", dirNames).Debug("List backups") + + return &Show{ + ctx: ctx, + sto: s, + cfg: cfg, + backupNames: dirNames, + }, nil +} - var info []string - info = append(info, string(m.BackupName)) - info = append(info, time.Unix(0, m.CreateTime*int64(time.Millisecond)).Format("2006-01-02 15:04:05")) - info = append(info, spaces) - info = append(info, strconv.FormatBool(m.Full)) +func (s *Show) downloadMetaFiles() ([]string, error) { + metaFiles := make([]string, 0) + for _, bname := range s.backupNames { + if !utils.IsBackupName(bname) { + log.Infof("%s is not backup name", bname) + continue + } + metaName := bname + ".meta" + localTmpPath := filepath.Join(utils.LocalTmpDir, metaName) + externalUri, _ := utils.UriJoin(s.cfg.Backend.Uri(), bname, metaName) + err := s.sto.Download(s.ctx, localTmpPath, externalUri, false) + if err != nil { + return nil, fmt.Errorf("download %s to %s failed: %w", externalUri, localTmpPath, err) + } + log.WithField("external", externalUri).WithField("local", localTmpPath).Debug("Download backup meta file successfully.") - info = append(info, strconv.FormatBool(m.IncludeSystemSpace)) + metaFiles = append(metaFiles, localTmpPath) + } - return info, nil + return metaFiles, nil } -func (s *Show) showMetaFiles() ([][]string, error) { - var asciiTable [][]string - broken_info := []string{"", "backup is broken", "N/A", "N/A", "N/A"} - - for _, d := range s.backupFiles { - metaFileName := d + ".meta" - metaFile := d + "/" + metaFileName - cmdStr := s.backend.RestoreMetaFileCommand(metaFile, "/tmp/") - s.log.Info("download metafile", zap.Strings("cmd", cmdStr)) - cmd := exec.Command(cmdStr[0], cmdStr[1:]...) - err := cmd.Run() - if err != nil { - s.log.Error("cmd run failed", zap.Strings("run cmd", cmdStr), zap.Error(err)) - broken_info[0] = d - broken_info[1] = broken_info[1] + ": backup meta file dowload error" // broken reason - asciiTable = append(asciiTable, broken_info) +func (s *Show) parseMetaFiles(metaPathList []string) ([]*backupInfo, error) { + var infoList []*backupInfo + for _, path := range metaPathList { + log.WithField("meta path", path).Debug("Start parse meta file") + m, err := utils.ParseMetaFromFile(path) + if m == nil { + log.WithError(err).WithField("meta path", path).Error("parse meta file failed") + infoList = append(infoList, nil) continue } - cmd.Wait() - info, err := s.readMetaFile(metaFileName) - if err != nil { - s.log.Error("parse meta file failed", zap.Error(err)) - broken_info[0] = d - broken_info[1] = broken_info[1] + ": meta file parse error, " + err.Error() // broken reason - continue + + spaces := make([]string, 0) + for _, b := range m.GetSpaceBackups() { + spaces = append(spaces, string(b.Space.SpaceName)) + } + + info := &backupInfo{ + BackupName: string(m.BackupName), + CreateTime: time.Unix(0, m.CreateTime*int64(time.Millisecond)).Format("2006-01-02 15:04:05"), + Spaces: spaces, + Full: m.Full, + AllSpaces: m.AllSpaces, } - asciiTable = append(asciiTable, info) + + infoList = append(infoList, info) } - return asciiTable, nil + return infoList, nil } -func (s *Show) ShowInfo() error { - dirs, err := s.backend.ListBackupCommand() - if err != nil { - s.log.Error("list backup file failed", zap.Error(err)) +func (s *Show) showBackupInfo(infoList []*backupInfo) { + asciiTable := make([][]string, 0) + for _, info := range infoList { + asciiTable = append(asciiTable, info.StringTable()) + } + + tablewriter := tablewriter.NewWriter(os.Stdout) + tablewriter.SetHeader(tableHeader) + tablewriter.AppendBulk(asciiTable) + tablewriter.Render() +} + +func (s *Show) Show() error { + logger := log.WithField("root", s.cfg.Backend.Uri()) + + if err := utils.EnsureDir(utils.LocalTmpDir); err != nil { return err } + defer func() { + if err := utils.RemoveDir(utils.LocalTmpDir); err != nil { + log.WithError(err).Errorf("Remove tmp dir %s failed", utils.LocalTmpDir) + } + }() - s.backupFiles = dirs - s.log.Info("list backup command return", zap.Strings("backup names", s.backupFiles)) + logger.Debug("Start download backup meta files.") + files, err := s.downloadMetaFiles() + if err != nil { + return err + } - table, err := s.showMetaFiles() + logger.Debug("Start parse backup meta files.") + infoList, err := s.parseMetaFiles(files) if err != nil { return err } - tablewriter := tablewriter.NewWriter(os.Stdout) - tablewriter.SetHeader(tableHeader) - tablewriter.AppendBulk(table) - tablewriter.Render() + logger.Debug("Start show meta info.") + s.showBackupInfo(infoList) return nil } diff --git a/pkg/storage/cmds.go b/pkg/storage/cmds.go deleted file mode 100644 index 733b861..0000000 --- a/pkg/storage/cmds.go +++ /dev/null @@ -1,57 +0,0 @@ -package storage - -import ( - "fmt" - "regexp" - "time" -) - -func GetBackupDirSuffix() string { - return fmt.Sprintf("_old_%d", time.Now().Unix()) -} - -func getBackDir(origin string) string { - return origin + GetBackupDirSuffix() -} - -func mvDirCommand(from string, to string) string { - if from != "" && to != "" { - return fmt.Sprintf("mv %s %s", from, to) - } - return "" -} - -var invalidDstRegex = `(^/+$)|(\s+)` -var allowedDstRegex = `\S+_old_[0-9]+$` - -func sanityCheckForRM(dst string) bool { - invalid, _ := regexp.Compile(invalidDstRegex) - if invalid.MatchString(dst) { - return false - } - allowed, _ := regexp.Compile(allowedDstRegex) - if !allowed.MatchString(dst) { - return false - } - return true -} - -func rmDirCommand(dst string) string { - if dst != "" && sanityCheckForRM(dst) { - return fmt.Sprintf("rm -r %s 2>/dev/null", dst) - } - return "" -} - -func mkDirCommand(dst string) string { - if dst != "" { - return fmt.Sprintf("mkdir -p %s", dst) - } - return "" -} - -func mvAndMkDirCommand(srcDir string, bkDir string) string { - mvCmd := mvDirCommand(srcDir, bkDir) - mkCmd := mkDirCommand(srcDir) - return fmt.Sprintf("%s && %s", mvCmd, mkCmd) -} diff --git a/pkg/storage/flags.go b/pkg/storage/flags.go new file mode 100644 index 0000000..e3a4868 --- /dev/null +++ b/pkg/storage/flags.go @@ -0,0 +1,88 @@ +package storage + +import ( + "fmt" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + pb "github.com/vesoft-inc/nebula-agent/pkg/proto" +) + +const ( + flagStorage = "storage" + + flagS3Endpoint = "s3.endpoint" + flagS3Region = "s3.region" + flagS3AccessKey = "s3.access_key" + flagS3SecretKey = "s3.secret_key" +) + +func AddFlags(flags *pflag.FlagSet) { + flags.String(flagStorage, "", + `backup target url, format: ://. + : a string indicating which backend type. optional: local, hdfs. + now hdfs and local is supported, s3 and oss are still experimental. + example: + for local - "local:///the/local/path/to/backup" + for s3 - "s3://example/url/to/the/backup" + `) + cobra.MarkFlagRequired(flags, flagStorage) + AddS3Flags(flags) + AddLocalFlags(flags) +} + +func AddS3Flags(flags *pflag.FlagSet) { + flags.String(flagS3Region, "", "S3 Option: set region or location to upload or download backup") + flags.String(flagS3Endpoint, "", + "S3 Option: set the S3 endpoint URL, please specify the http or https scheme explicitly") + flags.String(flagS3AccessKey, "", "S3 Option: set access key id") + flags.String(flagS3SecretKey, "", "S3 Option: set secret key for access id") +} + +func AddLocalFlags(flags *pflag.FlagSet) { + // There is no need extra flags for local storage other than local uri +} + +func ParseFromFlags(flags *pflag.FlagSet) (*pb.Backend, error) { + s, err := flags.GetString(flagStorage) + if err != nil { + return nil, err + } + s = strings.TrimRight(s, "/ ") // trim tailing space and / in passed in storage uri + + t := pb.ParseType(s) + b := &pb.Backend{} + switch t { + case pb.LocalType: + b.SetUri(s) + case pb.S3Type: + region, err := flags.GetString(flagS3Region) + if err != nil { + return nil, err + } + endpoint, err := flags.GetString(flagS3Endpoint) + if err != nil { + return nil, err + } + accessKey, err := flags.GetString(flagS3AccessKey) + if err != nil { + return nil, err + } + secretKey, err := flags.GetString(flagS3SecretKey) + if err != nil { + return nil, err + } + b.SetUri(s) + b.GetS3().Region = region + b.GetS3().Endpoint = endpoint + b.GetS3().AccessKey = accessKey + b.GetS3().SecretKey = secretKey + default: + return nil, fmt.Errorf("bad format backend: %d", t) + } + + log.WithField("type", t).WithField("uri", s).Debugln("Parse storage flag") + return b, nil +} diff --git a/pkg/storage/hdfs.go b/pkg/storage/hdfs.go deleted file mode 100644 index 38fc296..0000000 --- a/pkg/storage/hdfs.go +++ /dev/null @@ -1,167 +0,0 @@ -package storage - -import ( - "bufio" - "fmt" - "os/exec" - "strconv" - "strings" - - "github.com/vesoft-inc/nebula-br/pkg/context" - "go.uber.org/zap" -) - -type HDFSBackedStore struct { - url string - log *zap.Logger - backupName string - args string - command string -} - -func NewHDFSBackendStore(url string, log *zap.Logger, maxConcurrent int, args string, ctx *context.Context) *HDFSBackedStore { - return &HDFSBackedStore{url: url, log: log, args: args} -} - -func (s *HDFSBackedStore) SetBackupName(name string) { - if s.url[len(s.url)-1] != '/' { - s.url += "/" - } - s.url += name - s.backupName = name -} - -func (s HDFSBackedStore) URI() string { - return s.url -} -func (s HDFSBackedStore) Scheme() string { - return SCHEME_HDFS -} - -func (s HDFSBackedStore) copyCommand(src []string, dir string) string { - cmdFormat := "hadoop fs -mkdir -p " + dir + " && hadoop fs -copyFromLocal %s %s " + dir - files := "" - for _, f := range src { - files += f + " " - } - - return fmt.Sprintf(cmdFormat, s.args, files) -} - -func (s *HDFSBackedStore) BackupPreCommand() []string { - return []string{"hadoop", "fs", "-mkdir", s.url} -} - -func (s HDFSBackedStore) BackupMetaCommand(src []string) string { - metaDir := s.url + "/" + "meta" - return s.copyCommand(src, metaDir) -} - -func (s HDFSBackedStore) BackupMetaDir() string { - return s.url + "/" + "meta" -} - -func (s HDFSBackedStore) BackupStorageCommand(src []string, host string, spaceId string) []string { - var cmd []string - hosts := strings.Split(host, ":") - for i, dir := range src { - storageDir := s.url + "/" + "storage/" + hosts[0] + "/" + hosts[1] + "/" + "data" + strconv.Itoa(i) + "/" + spaceId - data := dir + "/data " - wal := dir + "/wal " - cmdStr := "hadoop fs -mkdir -p " + storageDir + " && hadoop fs -copyFromLocal " + s.args + " " + data + wal + storageDir - cmd = append(cmd, cmdStr) - } - return cmd -} - -func (s HDFSBackedStore) BackupMetaFileCommand(src string) []string { - if len(s.args) == 0 { - return []string{"hadoop", "fs", "-copyFromLocal", src, s.url} - } - args := strings.Fields(s.args) - args = append(args, src, s.url) - args = append([]string{"hadoop", "fs", "-copyFromLocal"}, args...) - return args -} - -func (s HDFSBackedStore) RestoreMetaFileCommand(file string, dst string) []string { - if len(s.args) == 0 { - return []string{"hadoop", "fs", "-copyToLocal", "-f", s.url + "/" + file, dst} - } - args := strings.Fields(s.args) - args = append(args, s.url+"/"+file, dst) - args = append([]string{"hadoop", "fs", "-copyToLocal", "-f"}, args...) - return args -} - -func (s HDFSBackedStore) RestoreMetaCommand(src []string, dst string) (string, []string) { - metaDir := s.url + "/" + "meta/" - files := "" - var sstFiles []string - for _, f := range src { - file := metaDir + f - files += file + " " - dstFile := dst + "/" + f - sstFiles = append(sstFiles, dstFile) - } - return fmt.Sprintf("hadoop fs -copyToLocal -f %s %s %s", files, s.args, dst), sstFiles -} - -func (s HDFSBackedStore) RestoreStorageCommand(host string, spaceID []string, dst []string) []string { - hosts := strings.Split(host, ":") - var cmd []string - for i, d := range dst { - storageDir := s.url + "/storage/" + hosts[0] + "/" + hosts[1] + "/" + "data" + strconv.Itoa(i) + "/" - dirs := "" - for _, id := range spaceID { - dirs += storageDir + id + " " - } - cmdStr := fmt.Sprintf("hadoop fs -copyToLocal %s %s %s", dirs, s.args, d) - cmd = append(cmd, cmdStr) - } - - return cmd -} - -func (s HDFSBackedStore) RestoreMetaPreCommand(srcDir string, bkDir string) string { - return mvAndMkDirCommand(srcDir, bkDir) -} - -func (s HDFSBackedStore) RestoreStoragePreCommand(srcDir string, bkDir string) string { - return mvAndMkDirCommand(srcDir, bkDir) -} - -func (s HDFSBackedStore) RestoreMetaPostCommand(bkDir string) string { - return rmDirCommand(bkDir) -} - -func (s HDFSBackedStore) RestoreStoragePostCommand(bkDir string) string { - return rmDirCommand(bkDir) -} - -func (s HDFSBackedStore) CheckCommand() string { - return "hadoop fs -ls " + s.url -} - -func (s HDFSBackedStore) ListBackupCommand() ([]string, error) { - output, err := exec.Command("hadoop", "fs", "-ls", "-C", s.url).Output() - if err != nil { - return nil, err - } - - var dirs []string - sc := bufio.NewScanner(strings.NewReader(string(output))) - for sc.Scan() { - line := sc.Text() - if !strings.HasPrefix(line, "hdfs://") { - break - } - index := strings.Index(line, s.url) - if index == -1 { - return nil, fmt.Errorf("Wrong hdfs file name %s", line) - } - dirs = append(dirs, line[len(s.url):]) - } - - return dirs, nil -} diff --git a/pkg/storage/local.go b/pkg/storage/local.go deleted file mode 100644 index 195764f..0000000 --- a/pkg/storage/local.go +++ /dev/null @@ -1,201 +0,0 @@ -package storage - -import ( - "fmt" - "io/ioutil" - "strconv" - "strings" - - "github.com/vesoft-inc/nebula-br/pkg/context" - "go.uber.org/zap" -) - -type LocalBackedStore struct { - dir string - backupName string - log *zap.Logger - args string - ctx *context.Context -} - -func NewLocalBackedStore(dir string, log *zap.Logger, maxConcurrent int, args string, ctx *context.Context) *LocalBackedStore { - return &LocalBackedStore{dir: dir, log: log, args: args, ctx: ctx} -} - -func (s *LocalBackedStore) SetBackupName(name string) { - s.backupName = name - s.dir += "/" + s.backupName -} - -func (s LocalBackedStore) URI() string { - return s.dir -} - -func (s LocalBackedStore) Scheme() string { - return SCHEME_LOCAL -} - -func (s LocalBackedStore) copyCommand(src []string, dir string) string { - cmdFormat := "mkdir -p " + dir + " && cp -rf %s %s " + dir - files := "" - for _, f := range src { - files += f + " " - } - - return fmt.Sprintf(cmdFormat, s.args, files) -} - -func (s LocalBackedStore) remoteCopyCommand(src []string, dstHost string, dstDir string) string { - cmdFormat := "scp -r %s %s " + dstHost + ":" + dstDir - files := "" - for _, f := range src { - files += f + " " - } - - return fmt.Sprintf(cmdFormat, s.args, files) -} - -func (s *LocalBackedStore) BackupPreCommand() []string { - return []string{"mkdir", s.dir} -} - -func (s LocalBackedStore) backupMetaCommandLocalCopy(src []string) string { - metaDir := s.BackupMetaDir() - - desturl := s.ctx.RemoteAddr - desturl += ":" - desturl += metaDir - - s.ctx.Reporter.MetaUploadingReport(s.ctx.RemoteAddr, src, desturl) - - return s.copyCommand(src, metaDir) -} - -func (s LocalBackedStore) backupMetaCommandRemoteCopy(src []string) string { - metaDir := s.BackupMetaDir() - - desturl := s.ctx.LocalAddr - desturl += ":" - desturl += metaDir - - s.ctx.Reporter.MetaUploadingReport(s.ctx.RemoteAddr, src, desturl) - - return s.remoteCopyCommand(src, s.ctx.LocalAddr, metaDir) -} - -func (s LocalBackedStore) BackupMetaDir() string { - return s.dir + "/" + "meta" -} - -func (s LocalBackedStore) BackupMetaCommand(src []string) string { - return s.backupMetaCommandRemoteCopy(src) -} - -func (s LocalBackedStore) BackupStorageCommand(src []string, host string, spaceId string) []string { - var cmd []string - for i, dir := range src { - storageDir := s.dir + "/" + "storage/" + host + "/" + "data" + strconv.Itoa(i) + "/" + spaceId //TODO(ywj): extract a common rule for tgt dir - data := dir + "/data " - wal := dir + "/wal " - - desturl := s.ctx.RemoteAddr - desturl += ":" - desturl += storageDir - - srcdirs := []string{data, wal} - - s.ctx.Reporter.StorageUploadingReport(spaceId, s.ctx.RemoteAddr, srcdirs, desturl) - - cmdStr := "mkdir -p " + storageDir + " && cp -rf " + s.args + " " + data + wal + storageDir - cmd = append(cmd, cmdStr) - } - return cmd -} - -func (s LocalBackedStore) BackupMetaFileCommand(src string) []string { - if len(s.args) == 0 { - return []string{"cp", src, s.dir} - } - args := strings.Fields(s.args) - args = append(args, src, s.dir) - args = append([]string{"cp"}, args...) - return args -} - -func (s LocalBackedStore) RestoreMetaFileCommand(file string, dst string) []string { - if len(s.args) == 0 { - return []string{"cp", s.dir + "/" + file, dst} - } - args := strings.Fields(s.args) - args = append(args, s.dir+"/"+file, dst) - args = append([]string{"cp"}, args...) - return args -} - -func (s LocalBackedStore) restoreMetaCommandFromRemote(src []string, dst string) (string, []string) { - metaDir := s.BackupMetaDir() - files := "" - var sstFiles []string - for _, f := range src { - file := metaDir + "/" + f - srcFile := s.ctx.LocalAddr + ":" + file - files += srcFile + " " - - dstFile := dst + "/" + f - sstFiles = append(sstFiles, dstFile) - } - return fmt.Sprintf("scp -r %s %s %s", s.args, files, dst), sstFiles -} - -func (s LocalBackedStore) RestoreMetaCommand(src []string, dst string) (string, []string) { - return s.restoreMetaCommandFromRemote(src, dst) -} - -func (s LocalBackedStore) RestoreStorageCommand(host string, spaceID []string, dst []string) []string { - var cmd []string - for i, d := range dst { - storageDir := s.dir + "/storage/" + host + "/" + "data" + strconv.Itoa(i) + "/" - dirs := "" - for _, id := range spaceID { - dirs += storageDir + id + " " - } - cmdStr := fmt.Sprintf("cp -rf %s %s %s", dirs, s.args, d) - cmd = append(cmd, cmdStr) - } - - return cmd -} - -func (s LocalBackedStore) RestoreMetaPreCommand(srcDir string, bkDir string) string { - // move to a backup path in case of error - return mvAndMkDirCommand(srcDir, bkDir) -} - -func (s LocalBackedStore) RestoreStoragePreCommand(srcDir string, bkDir string) string { - return mvAndMkDirCommand(srcDir, bkDir) -} - -func (s LocalBackedStore) RestoreMetaPostCommand(bkDir string) string { - return rmDirCommand(bkDir) -} - -func (s LocalBackedStore) RestoreStoragePostCommand(bkDir string) string { - return rmDirCommand(bkDir) -} - -func (s LocalBackedStore) CheckCommand() string { - return "ls " + s.dir -} - -func (s LocalBackedStore) ListBackupCommand() ([]string, error) { - files, err := ioutil.ReadDir(s.dir) - if err != nil { - return nil, err - } - - var backupFiles []string - for _, f := range files { - backupFiles = append(backupFiles, f.Name()) - } - return backupFiles, nil -} diff --git a/pkg/storage/oss.go b/pkg/storage/oss.go deleted file mode 100644 index 249974f..0000000 --- a/pkg/storage/oss.go +++ /dev/null @@ -1,154 +0,0 @@ -package storage - -import ( - "bufio" - "fmt" - "os/exec" - "path/filepath" - "strconv" - "strings" - - "github.com/vesoft-inc/nebula-br/pkg/context" - "go.uber.org/zap" -) - -type OSSBackedStore struct { - url string - log *zap.Logger - backupName string - maxConcurrent string - args string -} - -func NewOSSBackendStore(url string, log *zap.Logger, maxConcurrent int, args string, ctx *context.Context) *OSSBackedStore { - if !strings.HasSuffix(url, "/") { - newUrl := url + "/" - log.Warn("original oss url not end with '/'", zap.String("origin-url", url), zap.String("new-url", newUrl)) - url = newUrl - } - return &OSSBackedStore{url: url, log: log, maxConcurrent: strconv.Itoa(maxConcurrent), args: args} -} - -func (s *OSSBackedStore) SetBackupName(name string) { - s.backupName = name - if s.url[len(s.url)-1] != '/' { - s.url += "/" - } - s.url += name -} - -func (s *OSSBackedStore) BackupPreCommand() []string { - return nil -} - -func (s *OSSBackedStore) BackupStorageCommand(src []string, host string, spaceID string) []string { - var cmd []string - for i, dir := range src { - storageDir := s.url + "/" + "storage/" + host + "/" + "data" + strconv.Itoa(i) + "/" + spaceID + "/" - cmdStr := "ossutil cp -r " + dir + " " + storageDir + " " + s.args + " -j " + s.maxConcurrent - cmd = append(cmd, cmdStr) - } - return cmd -} - -func (s OSSBackedStore) BackupMetaCommand(src []string) string { - metaDir := s.url + "/" + "meta/" - return "ossutil cp -r " + filepath.Dir(src[0]) + " " + metaDir + " " + s.args + " -j " + s.maxConcurrent -} - -func (s OSSBackedStore) BackupMetaDir() string { - return s.url + "/" + "meta" -} -func (s OSSBackedStore) BackupMetaFileCommand(src string) []string { - if len(s.args) == 0 { - return []string{"ossutil", "cp", "-r", src, s.url + "/", "-j", s.maxConcurrent} - - } - args := strings.Fields(s.args) - args = append(args, "-r", src, s.url+"/", "-j", s.maxConcurrent) - args = append([]string{"ossutil", "cp"}, args...) - return args -} - -func (s OSSBackedStore) RestoreMetaFileCommand(file string, dst string) []string { - if len(s.args) == 0 { - return []string{"ossutil", "cp", "-r", s.url + "/" + file, dst, "-j", s.maxConcurrent} - } - args := strings.Fields(s.args) - args = append(args, "-r", s.url+"/"+file, dst, "-j", s.maxConcurrent) - args = append([]string{"ossutil", "cp"}, args...) - return args -} - -func (s OSSBackedStore) RestoreMetaCommand(src []string, dst string) (string, []string) { - metaDir := s.url + "/" + "meta/" - var sstFiles []string - for _, f := range src { - file := dst + "/" + f - sstFiles = append(sstFiles, file) - } - return fmt.Sprintf("ossutil cp -r %s %s -j %s %s", metaDir, dst, s.maxConcurrent, s.args), sstFiles -} -func (s OSSBackedStore) RestoreStorageCommand(host string, spaceID []string, dst []string) []string { - var cmd []string - for i, d := range dst { - storageDir := s.url + "/storage/" + host + "/" + "data" + strconv.Itoa(i) + "/" - cmdStr := fmt.Sprintf("ossutil cp -r %s %s -j %s %s", storageDir, d, s.maxConcurrent, s.args) - cmd = append(cmd, cmdStr) - } - - return cmd -} - -func (s OSSBackedStore) RestoreMetaPreCommand(srcDir string, bkDir string) string { - return mvAndMkDirCommand(srcDir, bkDir) -} - -func (s OSSBackedStore) RestoreStoragePreCommand(srcDir string, bkDir string) string { - return mvAndMkDirCommand(srcDir, bkDir) -} - -func (s OSSBackedStore) RestoreMetaPostCommand(bkDir string) string { - return rmDirCommand(bkDir) -} - -func (s OSSBackedStore) RestoreStoragePostCommand(bkDir string) string { - return rmDirCommand(bkDir) -} - -func (s OSSBackedStore) URI() string { - return s.url -} - -func (s OSSBackedStore) Scheme() string { - return SCHEME_OSS -} - -func (s OSSBackedStore) CheckCommand() string { - return "ossutil ls " + s.url -} - -func (s OSSBackedStore) ListBackupCommand() ([]string, error) { - output, err := exec.Command("ossutil", "ls", "-d", s.url).Output() - if err != nil { - return nil, err - } - - var dirs []string - sc := bufio.NewScanner(strings.NewReader(string(output))) - for sc.Scan() { - line := sc.Text() - if !strings.HasPrefix(line, "oss://") { - break - } - index := strings.Index(line, s.url) - if index == -1 { - return nil, fmt.Errorf("Wrong oss file name %s", line) - } - dentry := strings.TrimRight(line[len(s.url):], "/") - if dentry != "" { - dirs = append(dirs, dentry) - } - } - return dirs, nil -} diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go deleted file mode 100644 index 435b013..0000000 --- a/pkg/storage/s3.go +++ /dev/null @@ -1,139 +0,0 @@ -package storage - -import ( - "bufio" - "fmt" - "os/exec" - "path/filepath" - "strconv" - "strings" - - "github.com/vesoft-inc/nebula-br/pkg/context" - "go.uber.org/zap" -) - -type S3BackedStore struct { - url string - log *zap.Logger - backupName string - args string - command string -} - -func NewS3BackendStore(url string, log *zap.Logger, maxConcurrent int, args string, ctx *context.Context) *S3BackedStore { - return &S3BackedStore{url: url, log: log, args: args} -} - -func (s *S3BackedStore) SetBackupName(name string) { - s.backupName = name - if s.url[len(s.url)-1] != '/' { - s.url += "/" - } - s.url += name -} - -func (s *S3BackedStore) BackupPreCommand() []string { - return nil -} - -func (s *S3BackedStore) BackupStorageCommand(src []string, host string, spaceID string) []string { - var cmd []string - for i, dir := range src { - storageDir := s.url + "/" + "storage/" + host + "/" + "data" + strconv.Itoa(i) + "/" + spaceID + "/" - cmdStr := "aws " + s.args + " s3 sync " + dir + " " + storageDir - cmd = append(cmd, cmdStr) - } - - return cmd -} - -func (s S3BackedStore) BackupMetaCommand(src []string) string { - metaDir := s.url + "/" + "meta/" - return "aws " + s.args + " s3 sync " + filepath.Dir(src[0]) + " " + metaDir -} - -func (s S3BackedStore) BackupMetaDir() string { - return s.url + "/" + "meta" -} - -func (s S3BackedStore) BackupMetaFileCommand(src string) []string { - if len(s.args) == 0 { - return []string{"aws", "s3", "cp", src, s.url + "/"} - } - args := strings.Fields(s.args) - args = append(args, "s3", "cp", src, s.url+"/") - args = append([]string{"aws"}, args...) - return args -} - -func (s S3BackedStore) RestoreMetaFileCommand(file string, dst string) []string { - if len(s.args) == 0 { - return []string{"aws", "s3", "cp", s.url + "/" + file, dst} - } - args := strings.Fields(s.args) - args = append(args, "s3", "cp", s.url+"/"+file, dst) - args = append([]string{"aws"}, args...) - return args -} - -func (s S3BackedStore) RestoreMetaCommand(src []string, dst string) (string, []string) { - metaDir := s.url + "/" + "meta/" - var sstFiles []string - for _, f := range src { - file := dst + "/" + f - sstFiles = append(sstFiles, file) - } - return fmt.Sprintf("aws %s s3 sync %s "+dst, s.args, metaDir), sstFiles -} -func (s S3BackedStore) RestoreStorageCommand(host string, spaceID []string, dst []string) []string { - var cmd []string - for i, d := range dst { - storageDir := s.url + "/storage/" + host + "/" + "data" + strconv.Itoa(i) + "/" - cmdStr := fmt.Sprintf("aws %s s3 sync %s "+d, s.args, storageDir) - cmd = append(cmd, cmdStr) - } - - return cmd -} - -func (s S3BackedStore) RestoreMetaPreCommand(srcDir string, bkDir string) string { - return mvAndMkDirCommand(srcDir, bkDir) -} - -func (s S3BackedStore) RestoreStoragePreCommand(srcDir string, bkDir string) string { - return mvAndMkDirCommand(srcDir, bkDir) -} - -func (s S3BackedStore) RestoreMetaPostCommand(bkDir string) string { - return rmDirCommand(bkDir) -} - -func (s S3BackedStore) RestoreStoragePostCommand(bkDir string) string { - return rmDirCommand(bkDir) -} - -func (s S3BackedStore) URI() string { - return s.url -} -func (s S3BackedStore) Scheme() string { - return SCHEME_S3 -} - -func (s S3BackedStore) CheckCommand() string { - return "aws " + s.args + " s3 ls " + s.url -} - -func (s S3BackedStore) ListBackupCommand() ([]string, error) { - output, err := exec.Command("aws", "s3", "ls", s.url).Output() - if err != nil { - return nil, err - } - - var dirs []string - sc := bufio.NewScanner(strings.NewReader(string(output))) - for sc.Scan() { - w := strings.Fields(sc.Text()) - dirs = append(dirs, strings.TrimRight(w[1], "/")) - } - return dirs, nil -} diff --git a/pkg/storage/s3_test.go b/pkg/storage/s3_test.go deleted file mode 100644 index e880dbc..0000000 --- a/pkg/storage/s3_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package storage - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/vesoft-inc/nebula-br/pkg/context" - "go.uber.org/zap" -) - -var logger, _ = zap.NewProduction() - -func TestS3SetBackupName(t *testing.T) { - s3 := NewS3BackendStore("s3://nebulabackup/", logger, 5, "--cli-read-timeout", &context.Context{}) - s3.SetBackupName("backupname1") - assert.Equal(t, s3.backupName, "backupname1") - - assert.Equal(t, s3.URI(), "s3://nebulabackup/backupname1") - - s3 = NewS3BackendStore("s3://nebulabackup", logger, 5, "", &context.Context{}) - s3.SetBackupName("backupname2") - assert.Equal(t, s3.backupName, "backupname2") - - assert.Equal(t, s3.URI(), "s3://nebulabackup/backupname2") -} - -func TestS3StorageCommand(t *testing.T) { - backupRegion := "s3://nebulabackup/" - s3 := NewS3BackendStore(backupRegion, logger, 5, "--cli-read-timeout ", &context.Context{}) - s3.SetBackupName("backupname3") - host := "127.0.0.1" - spaceID := "1" - dataDir := []string{"/home/nebula/"} - cmd := s3.BackupStorageCommand(dataDir, host, spaceID) - dst := s3.URI() + "/storage/" + host + "/" + "data0/" + spaceID + "/" - assert.Equal(t, cmd[0], "aws --cli-read-timeout s3 sync /home/nebula/ "+dst) - - cmd = s3.RestoreStorageCommand(host, []string{spaceID}, []string{"/home/data"}) - expectCmd := "aws --cli-read-timeout s3 sync " + s3.URI() + "/storage/" + host + "/data0/" + " /home/data" - assert.Equal(t, cmd[0], expectCmd) -} - -func TestS3MetaCommand(t *testing.T) { - s3 := NewS3BackendStore("s3://nebulabackup", logger, 5, "", &context.Context{}) - s3.SetBackupName("backupmeta") - files := []string{"/data/a.sst", "/data/b.sst", "/data/c.sst"} - cmd := s3.BackupMetaCommand(files) - assert.Equal(t, cmd, "aws s3 sync /data s3://nebulabackup/backupmeta/meta/") - - f := []string{"a.sst", "b.sst", "c.sst"} - cmd, sstFiles := s3.RestoreMetaCommand(f, "/home/data") - assert.Equal(t, cmd, "aws s3 sync s3://nebulabackup/backupmeta/meta/ /home/data") - assert.Equal(t, sstFiles, []string{"/home/data/a.sst", "/home/data/b.sst", "/home/data/c.sst"}) -} - -func TestS3BackupMetaFileCommand(t *testing.T) { - backupRegion := "s3://nebulabackupfile/" - s3 := NewS3BackendStore(backupRegion, logger, 5, "", &context.Context{}) - s3.SetBackupName("backupmetafile") - metaFile := "/home/nebula/backup.meta" - cmd := s3.BackupMetaFileCommand(metaFile) - expectCmd := []string{"aws", "s3", "cp", metaFile, s3.URI() + "/"} - assert.Equal(t, cmd, expectCmd) - - cmd = s3.RestoreMetaFileCommand("backup.meta", "/home/data") - expectCmd = []string{"aws", "s3", "cp", "s3://nebulabackupfile/backupmetafile/backup.meta", "/home/data"} - assert.Equal(t, cmd, expectCmd) -} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go deleted file mode 100644 index c420703..0000000 --- a/pkg/storage/storage.go +++ /dev/null @@ -1,61 +0,0 @@ -package storage - -import ( - "fmt" - "net/url" - - "github.com/vesoft-inc/nebula-br/pkg/context" - "go.uber.org/zap" -) - -const ( - SCHEME_HDFS = "hdfs" - SCHEME_OSS = "oss" - SCHEME_S3 = "s3" - SCHEME_LOCAL = "local" -) - -type ExternalStorage interface { - SetBackupName(name string) - BackupPreCommand() []string - BackupStorageCommand(src []string, host string, spaceID string) []string - BackupMetaDir() string - BackupMetaCommand(src []string) string - BackupMetaFileCommand(src string) []string - RestoreMetaFileCommand(file string, dst string) []string - RestoreMetaCommand(src []string, dst string) (string, []string) - RestoreStorageCommand(host string, spaceID []string, dst []string) []string - - RestoreMetaPreCommand(srcDir string, bkDir string) string - RestoreStoragePreCommand(srcDir string, bkDir string) string - - RestoreMetaPostCommand(bkDir string) string - RestoreStoragePostCommand(bkDir string) string - - CheckCommand() string - ListBackupCommand() ([]string, error) - URI() string - Scheme() string -} - -func NewExternalStorage(storageUrl string, log *zap.Logger, maxConcurrent int, args string, ctx *context.Context) (ExternalStorage, error) { - u, err := url.Parse(storageUrl) - if err != nil { - return nil, err - } - - log.Info("parsed external storage", zap.String("schema", u.Scheme), zap.String("path", u.Path)) - - switch u.Scheme { - case SCHEME_LOCAL: - return NewLocalBackedStore(u.Path, log, maxConcurrent, args, ctx), nil - case SCHEME_S3: - return NewS3BackendStore(storageUrl, log, maxConcurrent, args, ctx), nil - case SCHEME_OSS: - return NewOSSBackendStore(storageUrl, log, maxConcurrent, args, ctx), nil - case SCHEME_HDFS: - return NewHDFSBackendStore(storageUrl, log, maxConcurrent, args, ctx), nil - default: - return nil, fmt.Errorf("Unsupported Backend Storage Types") - } -} diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go deleted file mode 100644 index 2f84154..0000000 --- a/pkg/storage/storage_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package storage - -import ( - "flag" - "io/ioutil" - "os" - "os/exec" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/vesoft-inc/nebula-br/pkg/context" - "github.com/vesoft-inc/nebula-br/pkg/remote" - "go.uber.org/zap" -) - -func TestStorage(t *testing.T) { - assert := assert.New(t) - logger, _ := zap.NewProduction() - s, err := NewExternalStorage("local:///tmp/backup", logger, 5, "", &context.Context{}) - assert.NoError(err) - assert.Equal(reflect.TypeOf(s).String(), "*storage.LocalBackedStore") - - assert.Equal(s.URI(), "/tmp/backup") - - s, err = NewExternalStorage("s3://nebulabackup/", logger, 5, "", &context.Context{}) - assert.NoError(err) - - assert.Equal(s.URI(), "s3://nebulabackup/") - - s, err = NewExternalStorage("oss://nebulabackup/", logger, 5, "", &context.Context{}) - assert.NoError(err) -} - -var userName = flag.String("user", "", "user for test") - -func TestStorageCmdMv(t *testing.T) { - ast := assert.New(t) - logger, _ := zap.NewProduction() - if *userName == "" { - t.Log("should provide username") - return - } - dir := "/tmp/testdir" - - cmd := exec.Command("mkdir", "-p", dir) - err := cmd.Run() - var cli *remote.Client - - ast.Nil(err) - - tname := "test.txt" - tcontent := "some.sample.content" - - file, _ := os.OpenFile(dir+"/"+tname, os.O_RDWR|os.O_CREATE, 0755) - file.WriteString(tcontent) - file.Close() - - bakpath := getBackDir(dir) - t.Logf("dir: %s , bakpath: %s", dir, bakpath) - - cli, err = remote.NewClient("127.0.0.1", *userName, logger) - ast.Nil(err) - - mvCmd := mvDirCommand(dir, bakpath) - err = cli.ExecCommandBySSH(mvCmd) - ast.Nil(err) - - dat, _ := ioutil.ReadFile(bakpath + "/" + tname) - ast.Equal(string(dat), tcontent) -} - -func TestStorageCmdEmpty(t *testing.T) { - ast := assert.New(t) - m1 := mvDirCommand("", "t") - m2 := mvDirCommand("t", "") - m3 := mvDirCommand("", "") - ast.Equal(m1, "") - ast.Equal(m2, "") - ast.Equal(m3, "") - - r1 := rmDirCommand("") - ast.Equal(r1, "") - - k1 := mkDirCommand("") - ast.Equal(k1, "") -} - -func TestRmCmdCheck(t *testing.T) { - ast := assert.New(t) - invalidDsts := []string{ - "/", "/abc/test /", "/data", "/some/sample/path_old_12345 ////", - } - for _, i := range invalidDsts { - res := sanityCheckForRM(i) - t.Logf("check '%s' ==> %v", i, res) - ast.False(res) - } - - validDst := getBackDir("/some/data/path") - res := sanityCheckForRM(validDst) - t.Logf("check '%s' ==> %v", validDst, res) - ast.True(res) -} diff --git a/pkg/utils/hosts.go b/pkg/utils/hosts.go new file mode 100644 index 0000000..5bd63fa --- /dev/null +++ b/pkg/utils/hosts.go @@ -0,0 +1,200 @@ +package utils + +import ( + "fmt" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/vesoft-inc/nebula-go/v2/nebula" + "github.com/vesoft-inc/nebula-go/v2/nebula/meta" +) + +// NebulaHosts group all services(storaged/metad/graphd/listener) and agents by hostname or ip +type NebulaHosts struct { + hosts map[string][]*meta.ServiceInfo // ip -> (agent, [storaged, metad, graphd, listener]) +} + +type HostDir struct { + Host string // ip + Dir string // nebula root dir +} + +func (h *NebulaHosts) String() string { + if h.hosts == nil { + return "nil" + } + + m := make(map[string]string) + for host, services := range h.hosts { + ss := make([]string, 0) + for _, s := range services { + dataPaths := make([]string, 0) + for _, d := range s.GetDir().GetData() { + dataPaths = append(dataPaths, string(d)) + } + ss = append(ss, fmt.Sprintf("%s[%s]: (data: %s, root: %s)", + StringifyAddr(s.GetAddr()), s.GetRole(), strings.Join(dataPaths, ","), string(s.GetDir().GetRoot()))) + } + m[host] = strings.Join(ss, " | ") + } + + return fmt.Sprintf("%v", m) +} + +func (h *NebulaHosts) LoadFrom(resp *meta.ListClusterInfoResp) error { + if resp.Code != nebula.ErrorCode_SUCCEEDED { + return fmt.Errorf("response is not successful, code is %s", resp.GetCode().String()) + } + + h.hosts = resp.GetHostServices() + log.WithField("host info", h.String()).Info("Get cluster topology from the nebula") + return nil +} + +func (h *NebulaHosts) StorageCount() int { + if h.hosts == nil { + return 0 + } + + c := 0 + for _, services := range h.hosts { + for _, s := range services { + if s.Role == meta.HostRole_STORAGE { + c++ + } + } + } + + return c +} + +// StorageParts count storage services group by data path count +// path count -> services count having same paths count +func (h *NebulaHosts) StoragePaths() map[int]int { + distribute := make(map[int]int) + + for _, services := range h.hosts { + for _, s := range services { + if s.Role == meta.HostRole_STORAGE { + distribute[len(s.Dir.Data)]++ + } + } + } + + return distribute +} + +func (h *NebulaHosts) HasService(addr *nebula.HostAddr) bool { + if addr == nil { + return false + } + + services, ok := h.hosts[addr.GetHost()] + if !ok { + return false + } + + for _, s := range services { + if s.Addr.GetHost() != addr.GetHost() { + log.WithField("should", addr.GetHost()). + WithField("but", s.Addr.GetHost()). + Infof("Wrong address %s in hosts map", StringifyAddr(s.Addr)) + continue + } + + if s.Addr.GetPort() == addr.GetPort() { + return true + } + } + + return false +} + +func (h *NebulaHosts) GetAgentFor(addr *nebula.HostAddr) (*nebula.HostAddr, error) { + if !h.HasService(addr) { + return nil, fmt.Errorf("service %s not found", StringifyAddr(addr)) + } + + services := h.hosts[addr.GetHost()] + for _, s := range services { + if s.Role == meta.HostRole_AGENT { + return s.Addr, nil + } + } + + return nil, fmt.Errorf("do not find agent for service: %s", StringifyAddr(addr)) +} + +func (h *NebulaHosts) GetRootDirs() map[string][]*HostDir { + hostRoots := make(map[string][]*HostDir) + for host, services := range h.hosts { + dirSet := make(map[string]bool) + for _, s := range services { + if s.Dir != nil && s.Dir.Root != nil { + if len(s.Dir.Root) != 0 { + dirSet[string(s.Dir.Root)] = true + } + } + } + + var dirs []*HostDir + for d := range dirSet { + dirs = append(dirs, &HostDir{host, d}) + } + hostRoots[host] = dirs + } + + return hostRoots +} + +func (h *NebulaHosts) GetAgents() []*nebula.HostAddr { + var al []*nebula.HostAddr + for _, services := range h.hosts { + for _, s := range services { + if s.Role == meta.HostRole_AGENT { + al = append(al, s.Addr) + } + } + } + + return al +} + +func (h *NebulaHosts) GetMetas() []*meta.ServiceInfo { + var sl []*meta.ServiceInfo + for _, services := range h.hosts { + for _, s := range services { + if s.Role == meta.HostRole_META { + sl = append(sl, s) + } + } + } + + return sl +} + +func (h *NebulaHosts) GetStorages() []*meta.ServiceInfo { + var sl []*meta.ServiceInfo + for _, services := range h.hosts { + for _, s := range services { + if s.Role == meta.HostRole_STORAGE { + sl = append(sl, s) + } + } + } + + return sl +} + +func (h *NebulaHosts) GetGraphs() []*meta.ServiceInfo { + var gl []*meta.ServiceInfo + for _, services := range h.hosts { + for _, s := range services { + if s.Role == meta.HostRole_GRAPH { + gl = append(gl, s) + } + } + } + + return gl +} diff --git a/pkg/utils/hosts_test.go b/pkg/utils/hosts_test.go new file mode 100644 index 0000000..4780328 --- /dev/null +++ b/pkg/utils/hosts_test.go @@ -0,0 +1,121 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/vesoft-inc/nebula-go/v2/nebula" + "github.com/vesoft-inc/nebula-go/v2/nebula/meta" +) + +var ( + nebulaRoot = []byte("/home/nebula/nebula-install") + nebulaMeta = [][]byte{ + []byte("/home/nebula/nebula-install/data/meta"), + } + nebulaStorage = [][]byte{ + []byte("/home/nebula/nebula-install/data/storage"), + } +) + +func parseAddrNoErr(t *testing.T, addrStr string) *nebula.HostAddr { + assert := assert.New(t) + addr, err := ParseAddr(addrStr) + assert.Nil(err, "parse address from string failed", err) + return addr +} + +func TestHosts(t *testing.T) { + assert := assert.New(t) + + localHost := "127.0.0.1" + metaAddr := parseAddrNoErr(t, "127.0.0.1:9559") + graphAddr := parseAddrNoErr(t, "127.0.0.1:9669") + storageAddr := parseAddrNoErr(t, "127.0.0.1:9779") + agentAddr := parseAddrNoErr(t, "127.0.0.1:8888") + host2 := "127.0.0.2" + metaAddr2 := parseAddrNoErr(t, "127.0.0.2:9559") + + metad := &meta.ServiceInfo{ + Dir: nebula.NewDirInfo().SetData(nebulaMeta).SetRoot(nebulaRoot), + Role: meta.HostRole_META, + Addr: metaAddr, + } + graphd := &meta.ServiceInfo{ + Dir: nebula.NewDirInfo().SetRoot(nebulaRoot), + Role: meta.HostRole_GRAPH, + Addr: graphAddr, + } + storaged := &meta.ServiceInfo{ + Dir: nebula.NewDirInfo().SetData(nebulaStorage).SetRoot(nebulaRoot), + Role: meta.HostRole_STORAGE, + Addr: storageAddr, + } + agent := &meta.ServiceInfo{ + Dir: nebula.NewDirInfo().SetRoot(nebulaRoot), + Role: meta.HostRole_AGENT, + Addr: agentAddr, + } + metad2 := &meta.ServiceInfo{ + Dir: nebula.NewDirInfo().SetData(nebulaMeta).SetRoot(nebulaRoot), + Role: meta.HostRole_META, + Addr: metaAddr2, + } + + resp := &meta.ListClusterInfoResp{ + HostServices: map[string][]*meta.ServiceInfo{ + localHost: { + metad, + graphd, + storaged, + agent, + }, + host2: { + metad2, + }, + }, + Code: nebula.ErrorCode_SUCCEEDED, + } + + h := &NebulaHosts{} + err := h.LoadFrom(resp) + assert.Nil(err, "Load from list cluster info response failed", err) + + // topology check logic + assert.Equal(h.StorageCount(), 1) + assert.Equal(h.StoragePaths(), map[int]int{1: 1}) + + // check service + assert.True(h.HasService(metaAddr)) + assert.True(h.HasService(metaAddr2)) + assert.True(h.HasService(graphAddr)) + assert.True(h.HasService(storageAddr)) + assert.True(h.HasService(agentAddr)) + + // check service's agent + addr1, err := h.GetAgentFor(metaAddr) + assert.Nil(err) + assert.Equal(addr1, agentAddr) + addr2, err := h.GetAgentFor(graphAddr) + assert.Nil(err) + assert.Equal(addr2, agentAddr) + addr3, err := h.GetAgentFor(agentAddr) + assert.Nil(err) + assert.Equal(addr3, agentAddr) + + addr4, err := h.GetAgentFor(metaAddr2) + assert.NotNil(err) + assert.Nil(addr4) + + // check dirs + assert.Equal(h.GetRootDirs(), map[string][]*HostDir{ + localHost: {&HostDir{localHost, string(nebulaRoot)}}, + host2: {&HostDir{host2, string(nebulaRoot)}}, + }) + + // check service list + assert.Equal(h.GetAgents(), []*nebula.HostAddr{agentAddr}) + assert.Equal(h.GetMetas(), []*meta.ServiceInfo{metad, metad2}) + assert.Equal(h.GetGraphs(), []*meta.ServiceInfo{graphd}) + assert.Equal(h.GetStorages(), []*meta.ServiceInfo{storaged}) +} diff --git a/pkg/utils/stringify.go b/pkg/utils/stringify.go new file mode 100644 index 0000000..862300c --- /dev/null +++ b/pkg/utils/stringify.go @@ -0,0 +1,73 @@ +package utils + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/vesoft-inc/nebula-go/v2/nebula" + "github.com/vesoft-inc/nebula-go/v2/nebula/meta" +) + +func StringifyAddr(addr *nebula.HostAddr) string { + if addr == nil { + return "nil" + } + return fmt.Sprintf("%s:%d", addr.GetHost(), addr.GetPort()) +} + +func ParseAddr(addrStr string) (*nebula.HostAddr, error) { + ipAddr := strings.Split(addrStr, ":") + if len(ipAddr) != 2 { + return nil, fmt.Errorf("bad format: %s", addrStr) + } + + port, err := strconv.ParseInt(ipAddr[1], 10, 32) + if err != nil { + return nil, fmt.Errorf("bad fomrat: %s", addrStr) + } + + return &nebula.HostAddr{ipAddr[0], nebula.Port(port)}, nil +} + +func StringifyBackup(b *meta.BackupMeta) string { + m := make(map[string]string) + m["backup name"] = string(b.GetBackupName()) + m["created time"] = time.Unix(b.GetCreateTime()/1000, 0).Local().String() + m["all spaces"] = fmt.Sprintf("%v", b.GetAllSpaces()) + m["full backup"] = fmt.Sprintf("%v", b.GetFull()) + + s := make([]string, 0, len(b.GetMetaFiles())) + for _, f := range b.GetMetaFiles() { + s = append(s, string(f)) + } + m["meta files"] = strings.Join(s, ",") + + s = make([]string, 0, len(b.GetSpaceBackups())) + for sid, backup := range b.GetSpaceBackups() { + s = append(s, fmt.Sprintf("%s: space-id %d, hosts: %d", backup.GetSpace().GetSpaceName(), sid, len(backup.GetHostBackups()))) + } + m["backups"] = strings.Join(s, ";") + + return fmt.Sprintf("%v", m) +} + +func StringifyClusterInfo(info *meta.ListClusterInfoResp) string { + m := make(map[string]string) + + for host, services := range info.GetHostServices() { + ss := make([]string, 0) + for _, s := range services { + dataPaths := make([]string, 0) + for _, d := range s.GetDir().GetData() { + dataPaths = append(dataPaths, string(d)) + } + ss = append(ss, fmt.Sprintf("%s[%s]: (data: %s, root: %s)", + StringifyAddr(s.GetAddr()), s.GetRole(), strings.Join(dataPaths, ","), string(s.GetDir().GetRoot()))) + } + m[host] = strings.Join(ss, " | ") + } + + return fmt.Sprintf("%v", m) +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index a1342d0..8de678e 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -2,21 +2,19 @@ package utils import ( "fmt" + "net/url" "os" + "path" + "strings" "github.com/facebook/fbthrift/thrift/lib/go/thrift" - "github.com/vesoft-inc/nebula-go/v2/nebula" "github.com/vesoft-inc/nebula-go/v2/nebula/meta" - "go.uber.org/zap" ) -func PutMetaToFile(logger *zap.Logger, meta *meta.BackupMeta, filename string) error { +func DumpMetaToFile(meta *meta.BackupMeta, filename string) error { file, err := os.OpenFile(filename, os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0644) if err != nil { - logger.Error("store backupmeta failed in open file", - zap.String("filename", filename), - zap.String("error", err.Error())) - return err + return fmt.Errorf("open file %s failed: %w", filename, err) } defer file.Close() @@ -30,23 +28,17 @@ func PutMetaToFile(logger *zap.Logger, meta *meta.BackupMeta, filename string) e err = meta.Write(binaryOut) if err != nil { - logger.Error("store backupmeta failed in write", - zap.String("filename", filename), - zap.String("error", err.Error())) - return err + return fmt.Errorf("write backup meta to %s failed: %w", filename, err) } binaryOut.Flush() return nil } -func GetMetaFromFile(logger *zap.Logger, filename string) (*meta.BackupMeta, error) { +func ParseMetaFromFile(filename string) (*meta.BackupMeta, error) { file, err := os.OpenFile(filename, os.O_RDONLY, 0644) if err != nil { - logger.Error("get backupmeta failed in open file", - zap.String("filename", filename), - zap.String("error", err.Error())) - return nil, err + return nil, fmt.Errorf("open file %s failed: %w", filename, err) } defer file.Close() @@ -60,46 +52,50 @@ func GetMetaFromFile(logger *zap.Logger, filename string) (*meta.BackupMeta, err m := meta.NewBackupMeta() err = m.Read(binaryIn) if err != nil { - logger.Error("get backupmeta failed in read", zap.String("filename", filename), zap.String("error", err.Error())) - return nil, err + return nil, fmt.Errorf("read from backup meta: %s failed: %w", filename, err) } return m, nil } -type BackupMetaOperator interface { - OprSBI(nebula.GraphSpaceID, *meta.SpaceBackupInfo) - OprBI(*meta.BackupInfo) - OprCKP(*nebula.CheckpointInfo) - OprPtBi(*nebula.PartitionBackupInfo) -} +const ( + LocalTmpDir = "/tmp/nebula-br" +) -type ShowBackupMeta struct { +func EnsureDir(dir string) error { + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("ensure dirs %s failed: %w", dir, err) + } + return nil } -func (s ShowBackupMeta) OprSBI(sid nebula.GraphSpaceID, m *meta.SpaceBackupInfo) { - fmt.Printf("space.id: %d .name: %s\n", sid, m.Space.SpaceName) -} -func (s ShowBackupMeta) OprBI(m *meta.BackupInfo) { - fmt.Printf("backupinfo.host: %s\n", m.Host.String()) +func RemoveDir(dir string) error { + err := os.RemoveAll(dir) + if err != nil { + return fmt.Errorf("remove tmp dirs %s failed: %w", dir, err) + } + return nil } -func (s ShowBackupMeta) OprCKP(m *nebula.CheckpointInfo) { - fmt.Printf("ckp.path: %s\n", string(m.Path)) + +func IsBackupName(path string) bool { + return strings.HasPrefix(path, "BACKUP") } -func (s ShowBackupMeta) OprPtBi(m *nebula.PartitionBackupInfo) { - for k, _ := range m.Info { - fmt.Printf("partid: %d\n", k) + +func UriJoin(elem ...string) (string, error) { + if len(elem) == 0 { + return "", fmt.Errorf("empty paths") } -} -func IterateBackupMeta(m map[nebula.GraphSpaceID]*meta.SpaceBackupInfo, bmo BackupMetaOperator) { - for k, v := range m { // k: nebula.GraphSpaceID, v: *meta.SpaceBackupInfo - bmo.OprSBI(k, v) - for _, binf := range v.Info { // bidx: int, binf: *meta.BackupInfo - bmo.OprBI(binf) - for _, ckp := range binf.Info { //ckp: *nebula.CheckpointInfo - bmo.OprCKP(ckp) - bmo.OprPtBi(ckp.PartitionInfo) - } - } + if len(elem) == 1 { + return elem[0], nil } + + u, err := url.Parse(elem[0]) + if err != nil { + return "", fmt.Errorf("parse base uri %s failed: %w", elem[0], err) + } + + elem[0] = u.Path + u.Path = path.Join(elem...) + return u.String(), nil } diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index f297b1d..3ab2de8 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -1,30 +1,59 @@ package utils import ( - "flag" - "fmt" + "path/filepath" + "reflect" "testing" + "time" "github.com/stretchr/testify/assert" - "github.com/vesoft-inc/nebula-br/pkg/metaclient" - "go.uber.org/zap" + "github.com/vesoft-inc/nebula-go/v2/nebula/meta" ) -var metafile = flag.String("meta", "", "metafile for test") +func TestDumpParseBackup(t *testing.T) { + assert := assert.New(t) -func TestIterateMetaBackup(t *testing.T) { - ast := assert.New(t) - metafname := *metafile - logger, _ := zap.NewProduction() - if metafname == "" { - t.Log("meta should be provided!") - return + files := [][]byte{ + []byte("__edges__.sst"), + []byte("__index__.sst"), + []byte("__tags__.sst"), } + backup := &meta.BackupMeta{ + SpaceBackups: make(map[int32]*meta.SpaceBackupInfo), + MetaFiles: files, + BackupName: []byte("backup_test"), + Full: true, + AllSpaces: true, + CreateTime: time.Now().Unix(), + } + + err := EnsureDir(LocalTmpDir) + assert.Nil(err, "Ensure local tmp dir failed", err) + defer func() { + err := RemoveDir(LocalTmpDir) + assert.Nil(err, "Remove local tmp dir failed", err) + }() + + tmpPath := filepath.Join(LocalTmpDir, "backup.meta") + err = DumpMetaToFile(backup, tmpPath) + assert.Nil(err, "Dump backup meta to file failed", err) + + backup1, err := ParseMetaFromFile(tmpPath) + assert.Nil(err, "Parse backup meta from file failed", err) + + assert.True(reflect.DeepEqual(backup, backup1), "Backup meta are not consistent after dump and parse") +} - m, err := GetMetaFromFile(logger, metafname) - ast.Nil(err) +func TestUriJoin(t *testing.T) { + assert := assert.New(t) - IterateBackupMeta(m.GetBackupInfo(), ShowBackupMeta{}) + root := "local://backup" + uri, err := UriJoin(root, "BACKUP_NAME") + assert.Nil(err, "Join uri failed", err) + assert.Equal(uri, "local://backup/BACKUP_NAME", "Uri join does not works as expected") - fmt.Printf("%s\n", metaclient.BackupMetaToString(m)) + root = "s3://backup" + uri, err = UriJoin(root, "root", "BACKUP_NAME") + assert.Nil(err, "Join uri failed", err) + assert.Equal(uri, "s3://backup/root/BACKUP_NAME", "Uri join does not works as expected") }