diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index f65f9bcd..25c79079 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -32,15 +32,149 @@ on: - 'deployment/**' - '**/*.go' - '**/go.mod' - - '**/go.sum' + - '**/go.sum' branches: - main workflow_dispatch: schedule: - cron: '0 0 * * *' +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true jobs: + test-backup-restore-cross-version: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + deploy_tools: [docker-compose] + milvus_mode: [standalone] + another_milvus_mode: [standalone] + # mq_type: [pulsar, kafka] # TODO: add pulsar and kafka + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python 3.8 + uses: actions/setup-python@v2 + with: + python-version: 3.8 + cache: pip + + - uses: actions/setup-go@v3 + with: + go-version: '1.18.0' + cache: true + + - name: Build + timeout-minutes: 5 + shell: bash + run: | + go get + go build + - name: Install dependency + + timeout-minutes: 5 + working-directory: tests + shell: bash + run: | + pip install -r requirements.txt --trusted-host https://test.pypi.org + + - name: Milvus deploy + + timeout-minutes: 15 + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + yq -i '.services.standalone.image="milvusdb/milvus:latest"' docker-compose.yml + docker-compose up -d + bash ../../scripts/check_healthy.sh + docker-compose ps -a + + - name: Prepare data + timeout-minutes: 5 + shell: bash + run: | + python example/prepare_data.py + + - name: Backup + timeout-minutes: 5 + shell: bash + run: | + ./milvus-backup check + ./milvus-backup list + ./milvus-backup create -n my_backup + ./milvus-backup list + - name: Restore backup + timeout-minutes: 5 + shell: bash + run: | + ./milvus-backup restore -n my_backup -s _recover + - name: Verify data + timeout-minutes: 5 + shell: bash + run: | + python example/verify_data.py + + - name: Save Backup + timeout-minutes: 5 + shell: bash + run: | + sudo cp -r deployment/${{ matrix.milvus_mode }}/volumes/minio/a-bucket/backup ${{ matrix.milvus_mode }}-backup + + - name: delete backup + timeout-minutes: 5 + shell: bash + run: | + ./milvus-backup delete -n my_backup + ./milvus-backup list + + - name: Uninstall Milvus + timeout-minutes: 5 + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + docker-compose down + sudo rm -rf volumes + + - name: Deploy Another Milvus + timeout-minutes: 15 + shell: bash + working-directory: deployment/${{ matrix.another_milvus_mode }} + run: | + yq -i '.services.standalone.image="milvusdb/milvus:2.2.0-latest"' docker-compose.yml + docker-compose up -d + bash ../../scripts/check_healthy.sh + docker-compose ps -a + + - name: Copy Backup to Another Milvus + timeout-minutes: 5 + shell: bash + run: | + sudo mkdir -p deployment/${{ matrix.another_milvus_mode }}/volumes/minio/a-bucket/backup + sudo cp -r ${{ matrix.milvus_mode }}-backup/my_backup deployment/${{ matrix.another_milvus_mode }}/volumes/minio/a-bucket/backup + + - name: List backup from another Milvus + timeout-minutes: 5 + working-directory: deployment/${{ matrix.another_milvus_mode }}/volumes/minio + shell: bash + run: | + pwd + ls -l + tree + - name: Restore backup from another Milvus + timeout-minutes: 5 + shell: bash + run: | + ./milvus-backup restore -n my_backup -s _recover + - name: Verify data from another Milvus + timeout-minutes: 5 + shell: bash + run: | + python example/verify_data.py + test-backup-restore-cli: runs-on: ubuntu-latest strategy: @@ -50,7 +184,7 @@ jobs: milvus_mode: [standalone, cluster] another_milvus_mode: [standalone, cluster] # mq_type: [pulsar, kafka] # TODO: add pulsar and kafka - + steps: - uses: actions/checkout@v3 @@ -64,7 +198,7 @@ jobs: with: go-version: '1.18.0' cache: true - + - name: Build timeout-minutes: 5 shell: bash @@ -72,15 +206,15 @@ jobs: go get go build - name: Install dependency - + timeout-minutes: 5 working-directory: tests shell: bash run: | pip install -r requirements.txt --trusted-host https://test.pypi.org - + - name: Milvus deploy - + timeout-minutes: 15 shell: bash working-directory: deployment/${{ matrix.milvus_mode }} @@ -88,17 +222,18 @@ jobs: docker-compose up -d bash ../../scripts/check_healthy.sh docker-compose ps -a - + - name: Prepare data timeout-minutes: 5 shell: bash run: | python example/prepare_data.py - + - name: Backup timeout-minutes: 5 shell: bash run: | + ./milvus-backup check ./milvus-backup list ./milvus-backup create -n my_backup ./milvus-backup list @@ -111,14 +246,14 @@ jobs: timeout-minutes: 5 shell: bash run: | - python example/verify_data.py + python example/verify_data.py - name: Save Backup timeout-minutes: 5 shell: bash run: | sudo cp -r deployment/${{ matrix.milvus_mode }}/volumes/minio/a-bucket/backup ${{ matrix.milvus_mode }}-backup - + - name: delete backup timeout-minutes: 5 shell: bash @@ -133,7 +268,7 @@ jobs: run: | docker-compose down sudo rm -rf volumes - + - name: Deploy Another Milvus timeout-minutes: 15 shell: bash @@ -142,14 +277,14 @@ jobs: docker-compose up -d bash ../../scripts/check_healthy.sh docker-compose ps -a - + - name: Copy Backup to Another Milvus timeout-minutes: 5 shell: bash run: | sudo mkdir -p deployment/${{ matrix.another_milvus_mode }}/volumes/minio/a-bucket/backup sudo cp -r ${{ matrix.milvus_mode }}-backup/my_backup deployment/${{ matrix.another_milvus_mode }}/volumes/minio/a-bucket/backup - + - name: List backup from another Milvus timeout-minutes: 5 working-directory: deployment/${{ matrix.another_milvus_mode }}/volumes/minio @@ -167,15 +302,143 @@ jobs: timeout-minutes: 5 shell: bash run: | - python example/verify_data.py + python example/verify_data.py + + test-backup-restore-with-custom-config: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + deploy_tools: [helm] + milvus_mode: [standalone] + milvus_minio_rootpath: ["", "file"] + backup_bucket_name: ["milvus-backup", "milvus-bucket"] + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python 3.8 + uses: actions/setup-python@v2 + with: + python-version: 3.8 + cache: pip + + - uses: actions/setup-go@v3 + with: + go-version: '1.18.0' + cache: true + + - name: Creating kind cluster + if: ${{ matrix.deploy_tools == 'helm' }} + uses: helm/kind-action@v1.2.0 + + - name: Modify Milvus config + + timeout-minutes: 15 + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + yq -i '.minio.rootPath = "${{ matrix.milvus_minio_rootpath }}"' values.yaml + - name: Build + timeout-minutes: 5 + shell: bash + run: | + if [ ${{ matrix.deploy_tools }} == 'helm' ]; then + yq -i '.minio.bucketName = "milvus-bucket"' configs/backup.yaml + yq -i '.minio.rootPath = "${{ matrix.milvus_minio_rootpath }}"' configs/backup.yaml + yq -i '.minio.backupBucketName = "${{ matrix.backup_bucket_name }}"' configs/backup.yaml + + fi + yq -i '.log.level = "debug"' configs/backup.yaml + cat configs/backup.yaml || true + go get + go build + - name: Install dependency + + timeout-minutes: 5 + working-directory: tests + shell: bash + run: | + pip install -r requirements.txt --trusted-host https://test.pypi.org + - name: Milvus deploy + + timeout-minutes: 15 + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + if [ ${{ matrix.deploy_tools}} == "helm" ]; then + helm repo add milvus https://milvus-io.github.io/milvus-helm + helm repo update + helm install --wait --timeout 600s milvus-backup milvus/milvus -f values.yaml + kubectl get pods + kubectl port-forward service/milvus-backup 19530 >/dev/null 2>&1 & + kubectl port-forward service/milvus-backup-minio 9000 >/dev/null 2>&1 & + sleep 10 + nc -vz 127.0.0.1 19530 + nc -vz 127.0.0.1 9000 + sleep 10 + kubectl get pods -n default | grep milvus-backup + fi + + if [ ${{ matrix.deploy_tools}} == "docker-compose" ]; then + docker-compose up -d + bash ../../scripts/check_healthy.sh + docker-compose ps -a + fi + - name: Prepare data + timeout-minutes: 5 + shell: bash + run: | + python example/prepare_data.py + - name: Backup + timeout-minutes: 5 + shell: bash + run: | + ./milvus-backup check + ./milvus-backup list + ./milvus-backup create -n my_backup + ./milvus-backup list + - name: Restore backup + timeout-minutes: 5 + shell: bash + run: | + ./milvus-backup restore -n my_backup -s _recover + - name: Verify data + timeout-minutes: 5 + shell: bash + run: | + python example/verify_data.py + - name: Export logs + if: ${{ always() }} + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + if [ ${{ matrix.deploy_tools}} == "helm" ]; then + bash ../../scripts/export_log_k8s.sh default milvus-backup logs + fi + + if [ ${{ matrix.deploy_tools}} == "docker-compose" ]; then + bash ../../scripts/export_log_docker.sh logs + fi + - name: Upload logs + if: ${{ ! success() }} + uses: actions/upload-artifact@v2 + with: + name: custom-config-${{ matrix.deploy_tools }}-${{ matrix.milvus_mode }} + path: | + ./logs + ./server.log + /tmp/ci_logs + deployment/${{ matrix.milvus_mode }}/logs test-backup-restore-api: runs-on: ubuntu-latest strategy: fail-fast: false matrix: - deploy_tools: [docker-compose, helm] - milvus_mode: [standalone, cluster] + deploy_tools: [docker-compose] + milvus_mode: [standalone] + case_tag: [L0, L1] exclude: - deploy_tools: helm milvus_mode: cluster @@ -239,7 +502,7 @@ jobs: sleep 10 kubectl get pods -n default | grep milvus-backup fi - + if [ ${{ matrix.deploy_tools}} == "docker-compose" ]; then docker-compose up -d bash ../../scripts/check_healthy.sh @@ -253,11 +516,11 @@ jobs: ./milvus-backup server > server.log 2>&1 & - name: Run test - timeout-minutes: 60 + timeout-minutes: 120 shell: bash working-directory: tests run: | - pytest -s -v --log-cli-level=INFO --capture=no --tags L0 L1 + pytest -s -v --tags ${{ matrix.case_tag }} -n 4 - name: Get Milvus status shell: bash @@ -265,7 +528,7 @@ jobs: run: | docker-compose ps -a || true kubectl get pods -n default || true - + - name: Export logs if: ${{ always() }} shell: bash @@ -274,16 +537,16 @@ jobs: if [ ${{ matrix.deploy_tools}} == "helm" ]; then bash ../../scripts/export_log_k8s.sh default milvus-backup logs fi - + if [ ${{ matrix.deploy_tools}} == "docker-compose" ]; then bash ../../scripts/export_log_docker.sh logs - fi - + fi + - name: Upload logs if: ${{ ! success() }} uses: actions/upload-artifact@v2 with: - name: logs-${{ matrix.deploy_tools }}-${{ matrix.milvus_mode }} + name: api-test-logs-${{ matrix.deploy_tools }}-${{ matrix.milvus_mode }} path: | ./logs ./server.log diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml new file mode 100644 index 00000000..f1dcb5f5 --- /dev/null +++ b/.github/workflows/nightly.yaml @@ -0,0 +1,127 @@ +name: Nightly Test + +on: + workflow_dispatch: + schedule: + - cron: '0 2 * * *' + +jobs: + test-backup-restore-api: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + deploy_tools: [docker-compose] + milvus_mode: [standalone] + milvus_version: [master-latest, 2.3.0-latest, 2.2.0-latest] + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python 3.8 + uses: actions/setup-python@v2 + with: + python-version: 3.8 + cache: pip + + - uses: actions/setup-go@v3 + with: + go-version: '1.18.0' + cache: true + + - name: Creating kind cluster + if: ${{ matrix.deploy_tools == 'helm' }} + uses: helm/kind-action@v1.2.0 + + - name: Build + timeout-minutes: 5 + shell: bash + run: | + if [ ${{ matrix.deploy_tools }} == 'helm' ]; then + yq -i '.minio.bucketName = "milvus-bucket"' configs/backup.yaml + yq -i '.minio.backupBucketName = "milvus-bucket"' configs/backup.yaml + yq -i '.minio.rootPath = "file"' configs/backup.yaml + fi + yq -i '.log.level = "debug"' configs/backup.yaml + cat configs/backup.yaml || true + go get + go build + - name: Install dependency + + timeout-minutes: 5 + working-directory: tests + shell: bash + run: | + pip install -r requirements.txt --trusted-host https://test.pypi.org + + - name: Milvus deploy + + timeout-minutes: 15 + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + if [ ${{ matrix.deploy_tools }} == "helm" ]; then + helm repo add milvus https://milvus-io.github.io/milvus-helm + helm repo update + helm install --wait --timeout 600s milvus-backup milvus/milvus --set image.all.tag=${{ matrix.milvus_version }} -f values.yaml + kubectl get pods + kubectl port-forward service/milvus-backup 19530 >/dev/null 2>&1 & + kubectl port-forward service/milvus-backup-minio 9000 >/dev/null 2>&1 & + sleep 10 + nc -vz 127.0.0.1 19530 + nc -vz 127.0.0.1 9000 + sleep 10 + kubectl get pods -n default | grep milvus-backup + fi + + if [ ${{ matrix.deploy_tools}} == "docker-compose" ]; then + yq -i '.services.standalone.image= "milvusdb/milvus:${{ matrix.milvus_version }}"' docker-compose.yml + cat docker-compose.yml + docker-compose up -d + bash ../../scripts/check_healthy.sh + docker-compose ps -a + fi + + - name: Start server + timeout-minutes: 5 + shell: bash + run: | + ./milvus-backup server > server.log 2>&1 & + + - name: Run test + timeout-minutes: 120 + shell: bash + working-directory: tests + run: | + pytest -s -v --tags L0, L1, L2, L3 + + - name: Get Milvus status + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + docker-compose ps -a || true + kubectl get pods -n default || true + + - name: Export logs + if: ${{ always() }} + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + if [ ${{ matrix.deploy_tools}} == "helm" ]; then + bash ../../scripts/export_log_k8s.sh default milvus-backup logs + fi + + if [ ${{ matrix.deploy_tools}} == "docker-compose" ]; then + bash ../../scripts/export_log_docker.sh logs + fi + + - name: Upload logs + if: ${{ ! success() }} + uses: actions/upload-artifact@v2 + with: + name: logs-${{ matrix.deploy_tools }}-${{ matrix.milvus_mode }} + path: | + ./logs + ./server.log + /tmp/ci_logs + deployment/${{ matrix.milvus_mode }}/logs diff --git a/.github/workflows/perf.yaml b/.github/workflows/perf.yaml new file mode 100644 index 00000000..7dc524d7 --- /dev/null +++ b/.github/workflows/perf.yaml @@ -0,0 +1,125 @@ +name: Perf Test + +on: + workflow_dispatch: + schedule: + - cron: '0 2 * * *' + +jobs: + test-backup-restore-api: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + deploy_tools: [docker-compose] + milvus_mode: [standalone] + milvus_version: [master-latest, 2.3.0-latest, 2.2.0-latest] + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python 3.8 + uses: actions/setup-python@v2 + with: + python-version: 3.8 + cache: pip + + - uses: actions/setup-go@v3 + with: + go-version: '1.18.0' + cache: true + + - name: Creating kind cluster + if: ${{ matrix.deploy_tools == 'helm' }} + uses: helm/kind-action@v1.2.0 + + - name: Build + timeout-minutes: 5 + shell: bash + run: | + if [ ${{ matrix.deploy_tools }} == 'helm' ]; then + yq -i '.minio.bucketName = "milvus-bucket"' configs/backup.yaml + yq -i '.minio.backupBucketName = "milvus-bucket"' configs/backup.yaml + yq -i '.minio.rootPath = "file"' configs/backup.yaml + fi + yq -i '.log.level = "debug"' configs/backup.yaml + cat configs/backup.yaml || true + go get + go build + - name: Install dependency + + timeout-minutes: 5 + working-directory: tests + shell: bash + run: | + pip install -r requirements.txt --trusted-host https://test.pypi.org + + - name: Milvus deploy + + timeout-minutes: 15 + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + if [ ${{ matrix.deploy_tools }} == "helm" ]; then + helm repo add milvus https://milvus-io.github.io/milvus-helm + helm repo update + helm install --wait --timeout 600s milvus-backup milvus/milvus --set image.all.tag=${{ matrix.milvus_version }} -f values.yaml + kubectl get pods + kubectl port-forward service/milvus-backup 19530 >/dev/null 2>&1 & + kubectl port-forward service/milvus-backup-minio 9000 >/dev/null 2>&1 & + sleep 10 + nc -vz 127.0.0.1 19530 + nc -vz 127.0.0.1 9000 + sleep 10 + kubectl get pods -n default | grep milvus-backup + fi + + if [ ${{ matrix.deploy_tools}} == "docker-compose" ]; then + docker-compose up -d + bash ../../scripts/check_healthy.sh + docker-compose ps -a + fi + + - name: Start server + timeout-minutes: 5 + shell: bash + run: | + ./milvus-backup server > server.log 2>&1 & + + - name: Run test + timeout-minutes: 120 + shell: bash + working-directory: tests + run: | + pytest -s -v --tags Perf + + - name: Get Milvus status + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + docker-compose ps -a || true + kubectl get pods -n default || true + + - name: Export logs + if: ${{ always() }} + shell: bash + working-directory: deployment/${{ matrix.milvus_mode }} + run: | + if [ ${{ matrix.deploy_tools}} == "helm" ]; then + bash ../../scripts/export_log_k8s.sh default milvus-backup logs + fi + + if [ ${{ matrix.deploy_tools}} == "docker-compose" ]; then + bash ../../scripts/export_log_docker.sh logs + fi + + - name: Upload logs + if: ${{ ! success() }} + uses: actions/upload-artifact@v2 + with: + name: logs-${{ matrix.deploy_tools }}-${{ matrix.milvus_mode }} + path: | + ./logs + ./server.log + /tmp/ci_logs + deployment/${{ matrix.milvus_mode }}/logs diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 9e4a28c1..ee24d4e7 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -28,4 +28,5 @@ jobs: version: latest args: release --clean env: - GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }} + GITHUB_HOMEBREW_TOKEN: ${{ secrets.HOMEBREW_TOKEN }} diff --git a/.goreleaser.yaml b/.goreleaser.yaml index f911a1b9..3660e61f 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -10,6 +10,14 @@ builds: goos: - linux - darwin + goarch: + - amd64 + - arm64 + ldflags: + - -w -s + - -X main.version={{ .Version }} + - -X main.commit={{ .Commit }} + - -X main.date={{ .Date }} archives: - format: tar.gz @@ -25,12 +33,26 @@ checksum: name_template: 'checksums.txt' snapshot: name_template: "{{ incpatch .Version }}-next" + +brews: + - + name: milvus-backup + folder: Formula + homepage: "https://github.com/zilliztech/milvus-backup" + repository: + owner: zilliztech + name: homebrew-tap + token: "{{ .Env.GITHUB_HOMEBREW_TOKEN }}" + + changelog: sort: asc + use: github filters: exclude: - '^docs:' - '^test:' + - Merge pull request # The lines beneath this are called `modelines`. See `:help modeline` # Feel free to remove those if you don't want/use them. diff --git a/README.md b/README.md index 0d1e6178..6422f773 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,17 @@ Milvus-Backup is a tool that allows users to backup and restore Milvus data. This tool can be utilized either through the command line or an API server. -In order to use Milvus-Backup effectively, access to Milvus proxy and Minio cluster is required. Configuration settings related to this access can be edited in `configs/backup.yaml`. +The Milvus-backup process has negligible impact on the performance of Milvus. Milvus cluster is fully functional and can operate normally while backup and restoration are in progress. + +## Compatibility +| Milvus | Milvus-backup | +|:----------------:|:----------------:| +| v2.3.0 and above | v0.4.0 and above | +| v2.2.9 and above | v0.3.0 and above | +| v2.2.0 to v2.2.8 | v0.1.0 to v0.2.2 | + +## Config +In order to use Milvus-Backup, access to Milvus proxy and Minio cluster is required. Configuration settings related to this access can be edited in `configs/backup.yaml`. > **Note** > @@ -14,8 +24,6 @@ In order to use Milvus-Backup effectively, access to Milvus proxy and Minio clus > |bucketName|a-bucket|milvus-bucket| > |rootPath|files|file| -The Milvus-backup process has negligible impact on the performance of Milvus. Milvus cluster is fully functional and can operate normally while backup and restoration are in progress. - ## Development ### Build @@ -137,14 +145,14 @@ curl --location --request GET 'http://localhost:8080/api/v1/get_restore?id=test_ Milvus-backup establish CLI based on cobra. Use the following command to see the usage. ``` -milvus-backup is a backup tool for milvus. +milvus-backup is a backup&restore tool for milvus. Usage: milvus-backup [flags] milvus-backup [command] Available Commands: - completion Generate the autocompletion script for the specified shell + check check if the connects is right. create create subcommand create a backup. delete delete subcommand delete backup by name. get get subcommand get backup by name. @@ -154,7 +162,8 @@ Available Commands: server server subcommand start milvus-backup RESTAPI server. Flags: - -h, --help help for milvus-backup + --config string config YAML file of milvus (default "backup.yaml") + -h, --help help for milvus-backup Use "milvus-backup [command] --help" for more information about a command. ``` @@ -163,6 +172,26 @@ Use "milvus-backup [command] --help" for more information about a command. To try this demo, you should have a functional Milvus server installed and have pymilvus library installed. +Step 0: Check the connections + +First of all, we can use `check` command to check whether connections to milvus and storage is normal: + +``` +./milvus-backup check +``` + +normal output: + +```shell +Succeed to connect to milvus and storage. +Milvus version: v2.3 +Storage: +milvus-bucket: a-bucket +milvus-rootpath: files +backup-bucket: a-bucket +backup-rootpath: backup +``` + Step 1: Prepare the Data Create a collection in Milvus called `hello_milvus` and insert some data using the following command: diff --git a/cmd/check.go b/cmd/check.go new file mode 100644 index 00000000..6b31a0ae --- /dev/null +++ b/cmd/check.go @@ -0,0 +1,32 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + "github.com/zilliztech/milvus-backup/core" + "github.com/zilliztech/milvus-backup/core/paramtable" +) + +var checkCmd = &cobra.Command{ + Use: "check", + Short: "check if the connects is right.", + + Run: func(cmd *cobra.Command, args []string) { + var params paramtable.BackupParams + fmt.Println("config:" + config) + params.GlobalInitWithYaml(config) + params.Init() + + context := context.Background() + backupContext := core.CreateBackupContext(context, params) + + resp := backupContext.Check(context) + fmt.Println(resp) + }, +} + +func init() { + rootCmd.AddCommand(checkCmd) +} diff --git a/cmd/create.go b/cmd/create.go index 9ec2a1c8..313c92c4 100644 --- a/cmd/create.go +++ b/cmd/create.go @@ -4,16 +4,22 @@ import ( "context" "fmt" "strings" + "time" + jsoniter "github.com/json-iterator/go" "github.com/spf13/cobra" "github.com/zilliztech/milvus-backup/core" "github.com/zilliztech/milvus-backup/core/paramtable" "github.com/zilliztech/milvus-backup/core/proto/backuppb" + "github.com/zilliztech/milvus-backup/core/utils" ) var ( backupName string collectionNames string + databases string + dbCollections string + force bool ) var createBackupCmd = &cobra.Command{ @@ -29,23 +35,46 @@ var createBackupCmd = &cobra.Command{ context := context.Background() backupContext := core.CreateBackupContext(context, params) + start := time.Now().Unix() var collectionNameArr []string if collectionNames == "" { collectionNameArr = []string{} } else { collectionNameArr = strings.Split(collectionNames, ",") } + + if dbCollections == "" && databases != "" { + dbCollectionDict := make(map[string][]string) + splits := strings.Split(databases, ",") + for _, db := range splits { + dbCollectionDict[db] = []string{} + } + completeDbCollections, err := jsoniter.MarshalToString(dbCollectionDict) + dbCollections = completeDbCollections + if err != nil { + fmt.Println("illegal databases input") + return + } + } resp := backupContext.CreateBackup(context, &backuppb.CreateBackupRequest{ BackupName: backupName, CollectionNames: collectionNameArr, + DbCollections: utils.WrapDBCollections(dbCollections), + Force: force, }) - fmt.Println(resp.GetCode(), "\n", resp.GetMsg()) + + fmt.Println(resp.GetMsg()) + duration := time.Now().Unix() - start + fmt.Println(fmt.Sprintf("duration:%d s", duration)) }, } func init() { createBackupCmd.Flags().StringVarP(&backupName, "name", "n", "", "backup name, if unset will generate a name automatically") - createBackupCmd.Flags().StringVarP(&collectionNames, "colls", "", "", "collectionNames to backup, use ',' to connect multiple collections") + createBackupCmd.Flags().StringVarP(&collectionNames, "colls", "c", "", "collectionNames to backup, use ',' to connect multiple collections") + createBackupCmd.Flags().StringVarP(&databases, "databases", "d", "", "databases to backup") + createBackupCmd.Flags().StringVarP(&dbCollections, "database_collections", "a", "", "databases and collections to backup, json format: {\"db1\":[\"c1\", \"c2\"],\"db2\":[]}") + createBackupCmd.Flags().BoolVarP(&force, "force", "f", false, "force backup skip flush, should make sure data has been stored into disk when using it") rootCmd.AddCommand(createBackupCmd) } diff --git a/cmd/delete.go b/cmd/delete.go index e38701ca..a0736876 100644 --- a/cmd/delete.go +++ b/cmd/delete.go @@ -30,7 +30,7 @@ var deleteBackupCmd = &cobra.Command{ BackupName: deleteBackName, }) - fmt.Println(resp.GetCode(), "\n", resp.GetMsg()) + fmt.Println(resp.GetMsg()) }, } diff --git a/cmd/get.go b/cmd/get.go index be83f74e..2828603c 100644 --- a/cmd/get.go +++ b/cmd/get.go @@ -2,6 +2,7 @@ package cmd import ( "context" + "encoding/json" "fmt" "github.com/spf13/cobra" @@ -12,6 +13,7 @@ import ( var ( getBackName string + getDetail bool ) var getBackupCmd = &cobra.Command{ @@ -28,15 +30,19 @@ var getBackupCmd = &cobra.Command{ backupContext := core.CreateBackupContext(context, params) resp := backupContext.GetBackup(context, &backuppb.GetBackupRequest{ - BackupName: getBackName, + BackupName: getBackName, + WithoutDetail: !getDetail, }) - fmt.Println(resp.GetCode(), "\n", resp.GetMsg()) + output, _ := json.MarshalIndent(resp.GetData(), "", " ") + fmt.Println(string(output)) + fmt.Println(resp.GetCode()) }, } func init() { getBackupCmd.Flags().StringVarP(&getBackName, "name", "n", "", "get backup with this name") + getBackupCmd.Flags().BoolVarP(&getDetail, "detail", "d", false, "get complete backup info") rootCmd.AddCommand(getBackupCmd) } diff --git a/cmd/restore.go b/cmd/restore.go index 72ae1ece..ae0cadcb 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -4,18 +4,28 @@ import ( "context" "fmt" "strings" + "time" + jsoniter "github.com/json-iterator/go" "github.com/spf13/cobra" "github.com/zilliztech/milvus-backup/core" "github.com/zilliztech/milvus-backup/core/paramtable" "github.com/zilliztech/milvus-backup/core/proto/backuppb" + "github.com/zilliztech/milvus-backup/core/utils" + "github.com/zilliztech/milvus-backup/internal/log" + + "go.uber.org/zap" ) var ( - restoreBackupName string - restoreCollectionNames string - renameSuffix string - renameCollectionNames string + restoreBackupName string + restoreCollectionNames string + renameSuffix string + renameCollectionNames string + restoreDatabases string + restoreDatabaseCollections string + restoreMetaOnly bool + restoreIndex bool ) var restoreBackupCmd = &cobra.Command{ @@ -24,38 +34,61 @@ var restoreBackupCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { var params paramtable.BackupParams - fmt.Println("config:" + config) params.GlobalInitWithYaml(config) params.Init() context := context.Background() backupContext := core.CreateBackupContext(context, params) - + log.Info("restore cmd input args", zap.Strings("args", args)) + start := time.Now().Unix() var collectionNameArr []string - if collectionNames == "" { + if restoreCollectionNames == "" { collectionNameArr = []string{} } else { collectionNameArr = strings.Split(restoreCollectionNames, ",") } - var renameMap map[string]string - if renameCollectionNames == "" { - renameMap = map[string]string{} - } else { + renameMap := make(map[string]string, 0) + if renameCollectionNames != "" { + fmt.Println("rename: " + renameCollectionNames) renameArr := strings.Split(renameCollectionNames, ",") - if len(renameArr) != len(collectionNameArr) { - fmt.Errorf("collection_names and renames num dismatch, Forbid to restore") + for _, rename := range renameArr { + if strings.Contains(rename, ":") { + splits := strings.Split(rename, ":") + renameMap[splits[0]] = splits[1] + } else { + fmt.Println("illegal rename parameter") + return + } } } + if restoreDatabaseCollections == "" && restoreDatabases != "" { + dbCollectionDict := make(map[string][]string) + splits := strings.Split(restoreDatabases, ",") + for _, db := range splits { + dbCollectionDict[db] = []string{} + } + completeDbCollections, err := jsoniter.MarshalToString(dbCollectionDict) + restoreDatabaseCollections = completeDbCollections + if err != nil { + fmt.Println("illegal databases input") + return + } + } resp := backupContext.RestoreBackup(context, &backuppb.RestoreBackupRequest{ BackupName: restoreBackupName, CollectionNames: collectionNameArr, CollectionSuffix: renameSuffix, CollectionRenames: renameMap, + DbCollections: utils.WrapDBCollections(restoreDatabaseCollections), + MetaOnly: restoreMetaOnly, + RestoreIndex: restoreIndex, }) - fmt.Println(resp.GetCode(), "\n", resp.GetMsg()) + fmt.Println(resp.GetMsg()) + duration := time.Now().Unix() - start + fmt.Println(fmt.Sprintf("duration:%d s", duration)) }, } @@ -63,7 +96,12 @@ func init() { restoreBackupCmd.Flags().StringVarP(&restoreBackupName, "name", "n", "", "backup name to restore") restoreBackupCmd.Flags().StringVarP(&restoreCollectionNames, "collections", "c", "", "collectionNames to restore") restoreBackupCmd.Flags().StringVarP(&renameSuffix, "suffix", "s", "", "add a suffix to collection name to restore") - restoreBackupCmd.Flags().StringVarP(&renameCollectionNames, "rename", "r", "", "rename collections to new names") + restoreBackupCmd.Flags().StringVarP(&renameCollectionNames, "rename", "r", "", "rename collections to new names, format: db1.collection1:db2.collection1_new,db1.collection2:db2.collection2_new") + restoreBackupCmd.Flags().StringVarP(&restoreDatabases, "databases", "d", "", "databases to restore, if not set, restore all databases") + restoreBackupCmd.Flags().StringVarP(&restoreDatabaseCollections, "database_collections", "a", "", "databases and collections to restore, json format: {\"db1\":[\"c1\", \"c2\"],\"db2\":[]}") + + restoreBackupCmd.Flags().BoolVarP(&restoreMetaOnly, "meta_only", "", false, "if set true, will restore meta only") + restoreBackupCmd.Flags().BoolVarP(&restoreIndex, "restore_index", "", false, "if set true, will restore index") rootCmd.AddCommand(restoreBackupCmd) } diff --git a/cmd/root.go b/cmd/root.go index b53534cd..67d87952 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -2,6 +2,8 @@ package cmd import ( "errors" + "fmt" + "github.com/spf13/cobra" ) @@ -11,8 +13,8 @@ var ( var rootCmd = &cobra.Command{ Use: "milvus-backup", - Short: "milvus-backup is a backup tool for milvus.", - Long: `milvus-backup is a backup tool for milvus.`, + Short: "milvus-backup is a backup&restore tool for milvus.", + Long: `milvus-backup is a backup&restore tool for milvus.`, Run: func(cmd *cobra.Command, args []string) { Error(cmd, args, errors.New("unrecognized command")) }, @@ -20,6 +22,10 @@ var rootCmd = &cobra.Command{ func Execute() { rootCmd.PersistentFlags().StringVarP(&config, "config", "", "backup.yaml", "config YAML file of milvus") - + rootCmd.CompletionOptions.DisableDefaultCmd = true rootCmd.Execute() } + +func SetVersionInfo(version, commit, date string) { + rootCmd.Version = fmt.Sprintf("%s (Built on %s from Git SHA %s)", version, date, commit) +} diff --git a/cmd/version.go b/cmd/version.go deleted file mode 100644 index 94d1d5e1..00000000 --- a/cmd/version.go +++ /dev/null @@ -1,20 +0,0 @@ -package cmd - -import ( - "fmt" - "github.com/spf13/cobra" -) - -const Version = "1.0-beta" - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "print the version of Milvus backup tool", - Run: func(cmd *cobra.Command, args []string) { - fmt.Println(fmt.Sprintf("Version: %s", Version)) - }, -} - -func init() { - rootCmd.AddCommand(versionCmd) -} diff --git a/configs/backup.yaml b/configs/backup.yaml index 16eccd2b..8c8767b6 100644 --- a/configs/backup.yaml +++ b/configs/backup.yaml @@ -1,7 +1,7 @@ # Configures the system log output. log: level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'. - console: true + console: true # whether print log to console file: rootPath: "logs/backup.log" @@ -21,13 +21,14 @@ milvus: # Related configuration of minio, which is responsible for data persistence for Milvus. minio: + cloudProvider: "minio" # remote cloud storage provider: s3, gcp, aliyun, azure + address: localhost # Address of MinIO/S3 port: 9000 # Port of MinIO/S3 - accessKeyID: minioadmin # accessKeyID of MinIO/S3 + accessKeyID: minioadmin # accessKeyID of MinIO/S3 secretAccessKey: minioadmin # MinIO/S3 encryption string useSSL: false # Access to MinIO/S3 with SSL useIAM: false - cloudProvider: "aws" iamEndpoint: "" bucketName: "a-bucket" # Milvus Bucket name in MinIO/S3, make it the same as your milvus instance @@ -37,4 +38,16 @@ minio: backupRootPath: "backup" # Rootpath to store backup data. Backup data will store to backupBucketName/backupRootPath backup: - maxSegmentGroupSize: 2G \ No newline at end of file + maxSegmentGroupSize: 2G + parallelism: 2 # collection level parallelism to backup + copydata: + # thread pool to copy data for each collection backup, default 100. + # which means if you set backup.parallelism = 2 backup.copydata.parallelism = 100, there will be 200 copy executing at the same time. + # reduce it if blocks your storage's network bandwidth + parallelism: 100 + +restore: + # Collection level parallelism to restore + # Only change it > 1 when you have more than one datanode. + # Because the max parallelism of Milvus bulkinsert is equal to datanodes' number. + parallelism: 2 \ No newline at end of file diff --git a/core/backup_context.go b/core/backup_context.go index a68ad79d..5404274f 100644 --- a/core/backup_context.go +++ b/core/backup_context.go @@ -8,11 +8,13 @@ import ( gomilvus "github.com/milvus-io/milvus-sdk-go/v2/client" "go.uber.org/zap" + "go.uber.org/zap/zapcore" "github.com/zilliztech/milvus-backup/core/paramtable" "github.com/zilliztech/milvus-backup/core/proto/backuppb" "github.com/zilliztech/milvus-backup/core/storage" "github.com/zilliztech/milvus-backup/core/utils" + "github.com/zilliztech/milvus-backup/internal/common" "github.com/zilliztech/milvus-backup/internal/log" ) @@ -36,10 +38,10 @@ type BackupContext struct { params paramtable.BackupParams // milvus client - milvusClient gomilvus.Client + milvusClient *MilvusClient // data storage client - storageClient storage.ChunkManager + storageClient *storage.ChunkManager milvusBucketName string backupBucketName string milvusRootPath string @@ -50,7 +52,7 @@ type BackupContext struct { restoreTasks map[string]*backuppb.RestoreBackupTask - //copyWorkerPool *common.WorkerPool + bulkinsertWorkerPool *common.WorkerPool } func CreateMilvusClient(ctx context.Context, params paramtable.BackupParams) (gomilvus.Client, error) { @@ -83,62 +85,25 @@ func CreateStorageClient(ctx context.Context, params paramtable.BackupParams) (s zap.String("address", minioEndPoint), zap.String("bucket", params.MinioCfg.BucketName), zap.String("backupBucket", params.MinioCfg.BackupBucketName)) - minioClient, err := storage.NewMinioChunkManager(ctx, - storage.Address(minioEndPoint), - storage.AccessKeyID(params.MinioCfg.AccessKeyID), - storage.SecretAccessKeyID(params.MinioCfg.SecretAccessKey), - storage.UseSSL(params.MinioCfg.UseSSL), - storage.BucketName(params.MinioCfg.BackupBucketName), - storage.RootPath(params.MinioCfg.RootPath), - storage.CloudProvider(params.MinioCfg.CloudProvider), - storage.UseIAM(params.MinioCfg.UseIAM), - storage.IAMEndpoint(params.MinioCfg.IAMEndpoint), - storage.CreateBucket(true), - ) + minioClient, err := storage.NewChunkManager(ctx, params) return minioClient, err } func (b *BackupContext) Start() error { - // start milvus go SDK client - milvusClient, err := CreateMilvusClient(b.ctx, b.params) - if err != nil { - log.Error("failed to initial milvus client", zap.Error(err)) - return err - } - b.milvusClient = milvusClient - - // start milvus storage client - minioClient, err := CreateStorageClient(b.ctx, b.params) - if err != nil { - log.Error("failed to initial storage client", zap.Error(err)) - return err - } - b.storageClient = minioClient - b.backupTasks = sync.Map{} b.backupNameIdDict = sync.Map{} b.restoreTasks = make(map[string]*backuppb.RestoreBackupTask) - - // init worker pool - //wp, err := common.NewWorkerPool(b.ctx, WORKER_NUM, RPS) - //if err != nil { - // log.Error("failed to initial copy data woker pool", zap.Error(err)) - // return err - //} - //b.copyWorkerPool = wp - //b.copyWorkerPool.Start() - b.started = true return nil } func (b *BackupContext) Close() error { b.started = false - err := b.milvusClient.Close() - //if b.copyWorkerPool != nil { - // b.copyWorkerPool.Done() - //} - return err + if b.milvusClient != nil { + err := b.getMilvusClient().Close() + return err + } + return nil } func CreateBackupContext(ctx context.Context, params paramtable.BackupParams) *BackupContext { @@ -152,7 +117,46 @@ func CreateBackupContext(ctx context.Context, params paramtable.BackupParams) *B } } -func (b BackupContext) GetBackup(ctx context.Context, request *backuppb.GetBackupRequest) *backuppb.BackupInfoResponse { +func (b *BackupContext) getMilvusClient() *MilvusClient { + if b.milvusClient == nil { + milvusClient, err := CreateMilvusClient(b.ctx, b.params) + if err != nil { + log.Error("failed to initial milvus client", zap.Error(err)) + panic(err) + } + b.milvusClient = &MilvusClient{ + client: milvusClient, + } + } + return b.milvusClient +} + +func (b *BackupContext) getStorageClient() storage.ChunkManager { + if b.storageClient == nil { + storageClient, err := CreateStorageClient(b.ctx, b.params) + if err != nil { + log.Error("failed to initial storage client", zap.Error(err)) + panic(err) + } + b.storageClient = &storageClient + } + return *b.storageClient +} + +func (b *BackupContext) getRestoreWorkerPool() *common.WorkerPool { + if b.bulkinsertWorkerPool == nil { + wp, err := common.NewWorkerPool(b.ctx, b.params.BackupCfg.RestoreParallelism, RPS) + if err != nil { + log.Error("failed to initial copy data woker pool", zap.Error(err)) + panic(err) + } + b.bulkinsertWorkerPool = wp + b.bulkinsertWorkerPool.Start() + } + return b.bulkinsertWorkerPool +} + +func (b *BackupContext) GetBackup(ctx context.Context, request *backuppb.GetBackupRequest) *backuppb.BackupInfoResponse { if request.GetRequestId() == "" { request.RequestId = utils.UUID() } @@ -172,26 +176,19 @@ func (b BackupContext) GetBackup(ctx context.Context, request *backuppb.GetBacku if err != nil { resp.Code = backuppb.ResponseCode_Fail resp.Msg = err.Error() - return resp } } if request.GetBackupId() == "" && request.GetBackupName() == "" { resp.Code = backuppb.ResponseCode_Parameter_Error resp.Msg = "empty backup name and backup id" - return resp - } - - if request.GetBackupId() != "" { + } else if request.GetBackupId() != "" { if value, ok := b.backupTasks.Load(request.GetBackupId()); ok { resp.Code = backuppb.ResponseCode_Success resp.Msg = "success" resp.Data = value.(*backuppb.BackupInfo) - return resp } - } - - if request.GetBackupName() != "" { + } else if request.GetBackupName() != "" { if id, ok := b.backupNameIdDict.Load(request.GetBackupName()); ok { resp.Code = backuppb.ResponseCode_Success resp.Msg = "success" @@ -199,7 +196,6 @@ func (b BackupContext) GetBackup(ctx context.Context, request *backuppb.GetBacku if ok { resp.Data = backup.(*backuppb.BackupInfo) } - return resp } else { var backupBucketName string var backupPath string @@ -218,7 +214,6 @@ func (b BackupContext) GetBackup(ctx context.Context, request *backuppb.GetBacku zap.Error(err)) resp.Code = backuppb.ResponseCode_Fail resp.Msg = err.Error() - return resp } resp.Data = backup @@ -232,17 +227,31 @@ func (b BackupContext) GetBackup(ctx context.Context, request *backuppb.GetBacku } } - log.Info("finish GetBackupRequest", - zap.String("requestId", request.GetRequestId()), - zap.String("backupName", request.GetBackupName()), - zap.String("backupId", request.GetBackupId()), - zap.String("bucketName", request.GetBucketName()), - zap.String("path", request.GetPath()), - zap.Any("resp", resp)) + if request.WithoutDetail { + resp = SimpleBackupResponse(resp) + } + + if log.GetLevel() == zapcore.DebugLevel { + log.Debug("finish GetBackupRequest", + zap.String("requestId", request.GetRequestId()), + zap.String("backupName", request.GetBackupName()), + zap.String("backupId", request.GetBackupId()), + zap.String("bucketName", request.GetBucketName()), + zap.String("path", request.GetPath()), + zap.Any("resp", resp)) + } else { + log.Info("finish GetBackupRequest", + zap.String("requestId", request.GetRequestId()), + zap.String("backupName", request.GetBackupName()), + zap.String("backupId", request.GetBackupId()), + zap.String("bucketName", request.GetBucketName()), + zap.String("path", request.GetPath())) + } + return resp } -func (b BackupContext) ListBackups(ctx context.Context, request *backuppb.ListBackupsRequest) *backuppb.ListBackupsResponse { +func (b *BackupContext) ListBackups(ctx context.Context, request *backuppb.ListBackupsRequest) *backuppb.ListBackupsResponse { if request.GetRequestId() == "" { request.RequestId = utils.UUID() } @@ -264,7 +273,7 @@ func (b BackupContext) ListBackups(ctx context.Context, request *backuppb.ListBa } // 1, trigger inner sync to get the newest backup list in the milvus cluster - backupPaths, _, err := b.storageClient.ListWithPrefix(ctx, b.backupBucketName, b.backupRootPath+SEPERATOR, false) + backupPaths, _, err := b.getStorageClient().ListWithPrefix(ctx, b.backupBucketName, b.backupRootPath+SEPERATOR, false) if err != nil { log.Error("Fail to list backup directory", zap.Error(err)) resp.Code = backuppb.ResponseCode_Fail @@ -319,7 +328,7 @@ func (b BackupContext) ListBackups(ctx context.Context, request *backuppb.ListBa return resp } -func (b BackupContext) DeleteBackup(ctx context.Context, request *backuppb.DeleteBackupRequest) *backuppb.DeleteBackupResponse { +func (b *BackupContext) DeleteBackup(ctx context.Context, request *backuppb.DeleteBackupRequest) *backuppb.DeleteBackupResponse { if request.GetRequestId() == "" { request.RequestId = utils.UUID() } @@ -366,7 +375,7 @@ func (b BackupContext) DeleteBackup(ctx context.Context, request *backuppb.Delet return resp } - err := b.storageClient.RemoveWithPrefix(ctx, b.backupBucketName, BackupDirPath(b.backupRootPath, request.GetBackupName())) + err := b.getStorageClient().RemoveWithPrefix(ctx, b.backupBucketName, BackupDirPath(b.backupRootPath, request.GetBackupName())) if err != nil { log.Error("Fail to delete backup", zap.String("backupName", request.GetBackupName()), zap.Error(err)) @@ -381,14 +390,14 @@ func (b BackupContext) DeleteBackup(ctx context.Context, request *backuppb.Delet return resp } -func (b BackupContext) readBackup(ctx context.Context, bucketName string, backupPath string) (*backuppb.BackupInfo, error) { +func (b *BackupContext) readBackup(ctx context.Context, bucketName string, backupPath string) (*backuppb.BackupInfo, error) { backupMetaDirPath := backupPath + SEPERATOR + META_PREFIX backupMetaPath := backupMetaDirPath + SEPERATOR + BACKUP_META_FILE collectionMetaPath := backupMetaDirPath + SEPERATOR + COLLECTION_META_FILE partitionMetaPath := backupMetaDirPath + SEPERATOR + PARTITION_META_FILE segmentMetaPath := backupMetaDirPath + SEPERATOR + SEGMENT_META_FILE - exist, err := b.storageClient.Exist(ctx, bucketName, backupMetaPath) + exist, err := b.getStorageClient().Exist(ctx, bucketName, backupMetaPath) if err != nil { log.Error("check backup meta file failed", zap.String("path", backupMetaPath), zap.Error(err)) return nil, err @@ -398,22 +407,22 @@ func (b BackupContext) readBackup(ctx context.Context, bucketName string, backup return nil, err } - backupMetaBytes, err := b.storageClient.Read(ctx, bucketName, backupMetaPath) + backupMetaBytes, err := b.getStorageClient().Read(ctx, bucketName, backupMetaPath) if err != nil { log.Error("Read backup meta failed", zap.String("path", backupMetaPath), zap.Error(err)) return nil, err } - collectionBackupMetaBytes, err := b.storageClient.Read(ctx, bucketName, collectionMetaPath) + collectionBackupMetaBytes, err := b.getStorageClient().Read(ctx, bucketName, collectionMetaPath) if err != nil { log.Error("Read collection meta failed", zap.String("path", collectionMetaPath), zap.Error(err)) return nil, err } - partitionBackupMetaBytes, err := b.storageClient.Read(ctx, bucketName, partitionMetaPath) + partitionBackupMetaBytes, err := b.getStorageClient().Read(ctx, bucketName, partitionMetaPath) if err != nil { log.Error("Read partition meta failed", zap.String("path", partitionMetaPath), zap.Error(err)) return nil, err } - segmentBackupMetaBytes, err := b.storageClient.Read(ctx, bucketName, segmentMetaPath) + segmentBackupMetaBytes, err := b.getStorageClient().Read(ctx, bucketName, segmentMetaPath) if err != nil { log.Error("Read segment meta failed", zap.String("path", segmentMetaPath), zap.Error(err)) return nil, err @@ -473,3 +482,53 @@ func (b *BackupContext) GetRestore(ctx context.Context, request *backuppb.GetRes return resp } } + +func (b *BackupContext) Check(ctx context.Context) string { + version, err := b.getMilvusClient().GetVersion(ctx) + if err != nil { + return "Failed to connect to milvus " + err.Error() + } + + info := fmt.Sprintf( + "Milvus version: %s\n"+ + "Storage:\n"+ + "milvus-bucket: %s\n"+ + "milvus-rootpath: %s\n"+ + "backup-bucket: %s\n"+ + "backup-rootpath: %s\n", + version, b.milvusBucketName, b.milvusRootPath, b.backupBucketName, b.backupRootPath) + + paths, _, err := b.getStorageClient().ListWithPrefix(ctx, b.milvusBucketName, b.milvusRootPath+SEPERATOR, false) + if err != nil { + return "Failed to connect to storage milvus path\n" + info + err.Error() + } + + if len(paths) == 0 { + return "Milvus storage root path is empty, please verify config if your cluster has is not empty\n" + info + } + + paths, _, err = b.getStorageClient().ListWithPrefix(ctx, b.backupBucketName, b.backupRootPath+SEPERATOR, false) + if err != nil { + return "Failed to connect to storage backup path " + info + err.Error() + } + + CHECK_PATH := ".milvus_backup_check" + + err = b.getStorageClient().Write(ctx, b.milvusBucketName, b.milvusRootPath+SEPERATOR+CHECK_PATH, []byte{1}) + if err != nil { + return "Failed to connect to storage milvus path\n" + info + err.Error() + } + defer func() { + b.getStorageClient().Remove(ctx, b.milvusBucketName, b.milvusRootPath+SEPERATOR+CHECK_PATH) + }() + + err = b.getStorageClient().Copy(ctx, b.milvusBucketName, b.backupBucketName, b.milvusRootPath+SEPERATOR+CHECK_PATH, b.backupRootPath+SEPERATOR+CHECK_PATH) + if err != nil { + return "Failed to copy file from milvus storage to backup storage\n" + info + err.Error() + } + defer func() { + b.getStorageClient().Remove(ctx, b.backupBucketName, b.backupRootPath+SEPERATOR+CHECK_PATH) + }() + + return "Succeed to connect to milvus and storage.\n" + info +} diff --git a/core/backup_context_test.go b/core/backup_context_test.go index 88044275..7ec9845f 100644 --- a/core/backup_context_test.go +++ b/core/backup_context_test.go @@ -3,13 +3,15 @@ package core import ( "context" "fmt" + "math/rand" + "testing" + "github.com/stretchr/testify/assert" "github.com/zilliztech/milvus-backup/core/paramtable" "github.com/zilliztech/milvus-backup/core/proto/backuppb" + "github.com/zilliztech/milvus-backup/core/utils" "github.com/zilliztech/milvus-backup/internal/log" "go.uber.org/zap" - "math/rand" - "testing" ) func TestCreateBackup(t *testing.T) { @@ -19,8 +21,9 @@ func TestCreateBackup(t *testing.T) { backup := CreateBackupContext(context, params) req := &backuppb.CreateBackupRequest{ - BackupName: "test_21", - CollectionNames: []string{"hello_milvus", "hello_milvus2"}, + BackupName: "test_21", + //CollectionNames: []string{"hello_milvus", "hello_milvus2"}, + DbCollections: utils.WrapDBCollections(""), } backup.CreateBackup(context, req) } @@ -35,7 +38,7 @@ func TestListBackups(t *testing.T) { assert.Equal(t, backupLists.GetCode(), backuppb.ResponseCode_Success) backupListsWithCollection := backupContext.ListBackups(context, &backuppb.ListBackupsRequest{ - CollectionName: "hello_milvus", + //CollectionName: "hello_milvus", }) for _, backup := range backupListsWithCollection.GetData() { @@ -59,7 +62,7 @@ func TestGetBackup(t *testing.T) { backupContext := CreateBackupContext(context, params) backup := backupContext.GetBackup(context, &backuppb.GetBackupRequest{ - BackupName: "test_backup", + BackupName: "mybackup", }) assert.Equal(t, backup.GetCode(), backuppb.ResponseCode_Success) } @@ -82,6 +85,26 @@ func TestDeleteBackup(t *testing.T) { } +func TestCreateBackupWithNoName(t *testing.T) { + var params paramtable.BackupParams + params.Init() + context := context.Background() + backup := CreateBackupContext(context, params) + + randBackupName := "" + + req := &backuppb.CreateBackupRequest{ + BackupName: randBackupName, + } + resp := backup.CreateBackup(context, req) + assert.Equal(t, backuppb.ResponseCode_Success, resp.GetCode()) + + // clean + backup.DeleteBackup(context, &backuppb.DeleteBackupRequest{ + BackupName: randBackupName, + }) +} + func TestCreateBackupWithUnexistCollection(t *testing.T) { var params paramtable.BackupParams params.Init() @@ -195,7 +218,7 @@ func TestGetBackupFaultBackup(t *testing.T) { resp := backupContext.CreateBackup(context, req) assert.Equal(t, backuppb.ResponseCode_Success, resp.GetCode()) - backupContext.storageClient.RemoveWithPrefix(context, params.MinioCfg.BackupBucketName, BackupMetaPath(params.MinioCfg.BackupRootPath, resp.GetData().GetName())) + backupContext.getStorageClient().RemoveWithPrefix(context, params.MinioCfg.BackupBucketName, BackupMetaPath(params.MinioCfg.BackupRootPath, resp.GetData().GetName())) backup := backupContext.GetBackup(context, &backuppb.GetBackupRequest{ BackupName: randBackupName, @@ -222,6 +245,22 @@ func TestGetBackupUnexistBackupName(t *testing.T) { } func TestRestoreBackup(t *testing.T) { + var params paramtable.BackupParams + params.Init() + context := context.Background() + backup := CreateBackupContext(context, params) + backup.Start() + backupName := "demo" + //fmt.Sprintf("test_%d", rand.Int()) + + restoreResp := backup.RestoreBackup(context, &backuppb.RestoreBackupRequest{ + BackupName: backupName, + DbCollections: utils.WrapDBCollections("{\"default\":[]}"), + }) + log.Info("restore backup", zap.Any("resp", restoreResp)) +} + +func TestCreateAndRestoreBackup(t *testing.T) { var params paramtable.BackupParams params.Init() context := context.Background() diff --git a/core/backup_impl_create_backup.go b/core/backup_impl_create_backup.go index 377e9767..82ed1249 100644 --- a/core/backup_impl_create_backup.go +++ b/core/backup_impl_create_backup.go @@ -8,6 +8,7 @@ import ( "strings" "time" + jsoniter "github.com/json-iterator/go" "github.com/milvus-io/milvus-sdk-go/v2/entity" "go.uber.org/zap" @@ -17,7 +18,7 @@ import ( "github.com/zilliztech/milvus-backup/internal/log" ) -func (b BackupContext) CreateBackup(ctx context.Context, request *backuppb.CreateBackupRequest) *backuppb.BackupInfoResponse { +func (b *BackupContext) CreateBackup(ctx context.Context, request *backuppb.CreateBackupRequest) *backuppb.BackupInfoResponse { if request.GetRequestId() == "" { request.RequestId = utils.UUID() } @@ -25,6 +26,7 @@ func (b BackupContext) CreateBackup(ctx context.Context, request *backuppb.Creat zap.String("requestId", request.GetRequestId()), zap.String("backupName", request.GetBackupName()), zap.Strings("collections", request.GetCollectionNames()), + zap.String("databaseCollections", utils.GetCreateDBCollections(request)), zap.Bool("async", request.GetAsync())) resp := &backuppb.BackupInfoResponse{ @@ -41,8 +43,11 @@ func (b BackupContext) CreateBackup(ctx context.Context, request *backuppb.Creat } // backup name validate + if request.GetBackupName() == "" { + request.BackupName = "backup_" + fmt.Sprint(time.Now().UTC().Format("2006_01_02_15_04_05_")) + fmt.Sprint(time.Now().Nanosecond()) + } if request.GetBackupName() != "" { - exist, err := b.storageClient.Exist(b.ctx, b.backupBucketName, b.backupRootPath+SEPERATOR+request.GetBackupName()) + exist, err := b.getStorageClient().Exist(b.ctx, b.backupBucketName, b.backupRootPath+SEPERATOR+request.GetBackupName()) if err != nil { errMsg := fmt.Sprintf("fail to check whether exist backup with name: %s", request.GetBackupName()) log.Error(errMsg, zap.Error(err)) @@ -66,14 +71,9 @@ func (b BackupContext) CreateBackup(ctx context.Context, request *backuppb.Creat return resp } - var name string - if request.GetBackupName() == "" { - name = "backup_" + fmt.Sprint(time.Now().Unix()) - } else { - name = request.BackupName - } + var name string = request.BackupName - milvusVersion, err := b.milvusClient.GetVersion(b.ctx) + milvusVersion, err := b.getMilvusClient().GetVersion(b.ctx) if err != nil { log.Error("fail to get milvus version", zap.Error(err)) resp.Code = backuppb.ResponseCode_Fail @@ -92,7 +92,7 @@ func (b BackupContext) CreateBackup(ctx context.Context, request *backuppb.Creat b.backupNameIdDict.Store(name, request.GetRequestId()) if request.Async { - go b.executeCreateBackup(ctx, request, backup) + go b.executeCreateBackupV2(ctx, request, backup) asyncResp := &backuppb.BackupInfoResponse{ RequestId: request.GetRequestId(), Code: backuppb.ResponseCode_Success, @@ -101,7 +101,7 @@ func (b BackupContext) CreateBackup(ctx context.Context, request *backuppb.Creat } return asyncResp } else { - task, err := b.executeCreateBackup(ctx, request, backup) + task, err := b.executeCreateBackupV2(ctx, request, backup) resp.Data = task if err != nil { resp.Code = backuppb.ResponseCode_Fail @@ -114,7 +114,7 @@ func (b BackupContext) CreateBackup(ctx context.Context, request *backuppb.Creat } } -func (b BackupContext) refreshBackupMeta(id string, backupInfo *backuppb.BackupInfo, leveledBackupInfo *LeveledBackupInfo) (*backuppb.BackupInfo, error) { +func (b *BackupContext) refreshBackupMeta(id string, backupInfo *backuppb.BackupInfo, leveledBackupInfo *LeveledBackupInfo) (*backuppb.BackupInfo, error) { log.Debug("call refreshBackupMeta", zap.String("id", id)) backup, err := levelToTree(leveledBackupInfo) if err != nil { @@ -125,31 +125,71 @@ func (b BackupContext) refreshBackupMeta(id string, backupInfo *backuppb.BackupI return backup, nil } -type collection struct { +func (b *BackupContext) refreshBackupCache(backupInfo *backuppb.BackupInfo) { + log.Debug("refreshBackupCache", zap.String("id", backupInfo.GetId())) + b.backupTasks.Store(backupInfo.GetId(), backupInfo) +} + +type collectionStruct struct { db string collectionName string } -func (b BackupContext) parseBackupCollections(request *backuppb.CreateBackupRequest) ([]collection, error) { +// parse collections to backup +// For backward compatibility: +// 1,parse dbCollections first, +// 2,if dbCollections not set, use collectionNames +func (b *BackupContext) parseBackupCollections(request *backuppb.CreateBackupRequest) ([]collectionStruct, error) { log.Debug("Request collection names", zap.Strings("request_collection_names", request.GetCollectionNames()), + zap.String("request_db_collections", utils.GetCreateDBCollections(request)), zap.Int("length", len(request.GetCollectionNames()))) - var toBackupCollections []collection + var toBackupCollections []collectionStruct + + dbCollectionsStr := utils.GetCreateDBCollections(request) + // first priority: dbCollections + if dbCollectionsStr != "" { + var dbCollections DbCollections + err := jsoniter.UnmarshalFromString(dbCollectionsStr, &dbCollections) + if err != nil { + log.Error("fail in unmarshal dbCollections in CreateBackupRequest", zap.String("dbCollections", dbCollectionsStr), zap.Error(err)) + return nil, err + } + for db, collections := range dbCollections { + if len(collections) == 0 { + collections, err := b.getMilvusClient().ListCollections(b.ctx, db) + if err != nil { + log.Error("fail in ListCollections", zap.Error(err)) + return nil, err + } + for _, coll := range collections { + log.Debug("Add collection to toBackupCollections", zap.String("db", db), zap.String("collection", coll.Name)) + toBackupCollections = append(toBackupCollections, collectionStruct{db, coll.Name}) + } + } else { + for _, coll := range collections { + toBackupCollections = append(toBackupCollections, collectionStruct{db, coll}) + } + } + } + log.Debug("Parsed backup collections from request.db_collections", zap.Int("length", len(toBackupCollections))) + return toBackupCollections, nil + } + if request.GetCollectionNames() == nil || len(request.GetCollectionNames()) == 0 { - dbs, err := b.milvusClient.ListDatabases(b.ctx) + dbs, err := b.getMilvusClient().ListDatabases(b.ctx) if err != nil { log.Error("fail in ListDatabases", zap.Error(err)) return nil, err } for _, db := range dbs { - b.milvusClient.UsingDatabase(b.ctx, db.Name) - collections, err := b.milvusClient.ListCollections(b.ctx) + collections, err := b.getMilvusClient().ListCollections(b.ctx, db.Name) if err != nil { log.Error("fail in ListCollections", zap.Error(err)) return nil, err } for _, coll := range collections { - toBackupCollections = append(toBackupCollections, collection{db.Name, coll.Name}) + toBackupCollections = append(toBackupCollections, collectionStruct{db.Name, coll.Name}) } } log.Debug(fmt.Sprintf("List %v collections", len(toBackupCollections))) @@ -161,9 +201,8 @@ func (b BackupContext) parseBackupCollections(request *backuppb.CreateBackupRequ dbName = splits[0] collectionName = splits[1] } - b.milvusClient.UsingDatabase(b.ctx, dbName) - exist, err := b.milvusClient.HasCollection(b.ctx, collectionName) + exist, err := b.getMilvusClient().HasCollection(b.ctx, dbName, collectionName) if err != nil { log.Error("fail in HasCollection", zap.Error(err)) return nil, err @@ -173,14 +212,399 @@ func (b BackupContext) parseBackupCollections(request *backuppb.CreateBackupRequ log.Error(errMsg) return nil, errors.New(errMsg) } - toBackupCollections = append(toBackupCollections, collection{dbName, collectionName}) + toBackupCollections = append(toBackupCollections, collectionStruct{dbName, collectionName}) } } return toBackupCollections, nil } -func (b BackupContext) executeCreateBackup(ctx context.Context, request *backuppb.CreateBackupRequest, backupInfo *backuppb.BackupInfo) (*backuppb.BackupInfo, error) { +func (b *BackupContext) backupCollection(ctx context.Context, backupInfo *backuppb.BackupInfo, collection collectionStruct, force bool) error { + log.Info("start backup collection", zap.String("db", collection.db), zap.String("collection", collection.collectionName)) + // list collection result is not complete + completeCollection, err := b.getMilvusClient().DescribeCollection(b.ctx, collection.db, collection.collectionName) + if err != nil { + log.Error("fail in DescribeCollection", zap.Error(err)) + return err + } + fields := make([]*backuppb.FieldSchema, 0) + for _, field := range completeCollection.Schema.Fields { + fields = append(fields, &backuppb.FieldSchema{ + FieldID: field.ID, + Name: field.Name, + IsPrimaryKey: field.PrimaryKey, + Description: field.Description, + AutoID: field.AutoID, + DataType: backuppb.DataType(field.DataType), + TypeParams: utils.MapToKVPair(field.TypeParams), + IndexParams: utils.MapToKVPair(field.IndexParams), + IsDynamic: field.IsDynamic, + IsPartitionKey: field.IsPartitionKey, + }) + } + schema := &backuppb.CollectionSchema{ + Name: completeCollection.Schema.CollectionName, + Description: completeCollection.Schema.Description, + AutoID: completeCollection.Schema.AutoID, + Fields: fields, + EnableDynamicField: completeCollection.Schema.EnableDynamicField, + } + + indexInfos := make([]*backuppb.IndexInfo, 0) + indexDict := make(map[string]*backuppb.IndexInfo, 0) + log.Info("try to get index", + zap.String("collection_name", completeCollection.Name)) + for _, field := range completeCollection.Schema.Fields { + //if field.DataType != entity.FieldTypeBinaryVector && field.DataType != entity.FieldTypeFloatVector { + // continue + //} + fieldIndex, err := b.getMilvusClient().DescribeIndex(b.ctx, collection.db, completeCollection.Name, field.Name) + if err != nil { + if strings.Contains(err.Error(), "index not found") || + strings.HasPrefix(err.Error(), "index doesn't exist") { + // todo + log.Info("field has no index", + zap.String("collection_name", completeCollection.Name), + zap.String("field_name", field.Name)) + continue + } else { + log.Error("fail in DescribeIndex", zap.Error(err)) + return err + } + } + log.Info("field index", + zap.String("collection_name", completeCollection.Name), + zap.String("field_name", field.Name), + zap.Any("index info", fieldIndex)) + for _, index := range fieldIndex { + if _, ok := indexDict[index.Name()]; ok { + continue + } else { + indexInfo := &backuppb.IndexInfo{ + FieldName: index.FieldName(), + IndexName: index.Name(), + IndexType: string(index.IndexType()), + Params: index.Params(), + } + indexInfos = append(indexInfos, indexInfo) + indexDict[index.Name()] = indexInfo + } + } + } + + collectionBackup := &backuppb.CollectionBackupInfo{ + Id: utils.UUID(), + StateCode: backuppb.BackupTaskStateCode_BACKUP_INITIAL, + StartTime: time.Now().Unix(), + CollectionId: completeCollection.ID, + DbName: collection.db, // todo currently db_name is not used in many places + CollectionName: completeCollection.Name, + Schema: schema, + ShardsNum: completeCollection.ShardNum, + ConsistencyLevel: backuppb.ConsistencyLevel(completeCollection.ConsistencyLevel), + HasIndex: len(indexInfos) > 0, + IndexInfos: indexInfos, + } + backupInfo.CollectionBackups = append(backupInfo.CollectionBackups, collectionBackup) + + b.refreshBackupCache(backupInfo) + partitionBackupInfos := make([]*backuppb.PartitionBackupInfo, 0) + partitions, err := b.getMilvusClient().ShowPartitions(b.ctx, collectionBackup.GetDbName(), collectionBackup.GetCollectionName()) + if err != nil { + log.Error("fail to ShowPartitions", zap.Error(err)) + return err + } + + // use GetLoadingProgress currently, GetLoadState is a new interface @20230104 milvus pr#21515 + collectionLoadProgress, err := b.getMilvusClient().GetLoadingProgress(ctx, collectionBackup.GetDbName(), collectionBackup.GetCollectionName(), []string{}) + if err != nil { + log.Error("fail to GetLoadingProgress of collection", zap.Error(err)) + return err + } + + var collectionLoadState string + partitionLoadStates := make(map[string]string, 0) + if collectionLoadProgress == 0 { + collectionLoadState = LoadState_NotLoad + for _, partition := range partitions { + partitionLoadStates[partition.Name] = LoadState_NotLoad + } + } else if collectionLoadProgress == 100 { + collectionLoadState = LoadState_Loaded + for _, partition := range partitions { + partitionLoadStates[partition.Name] = LoadState_Loaded + } + } else { + collectionLoadState = LoadState_Loading + for _, partition := range partitions { + loadProgress, err := b.getMilvusClient().GetLoadingProgress(ctx, collectionBackup.GetDbName(), collectionBackup.GetCollectionName(), []string{partition.Name}) + if err != nil { + log.Error("fail to GetLoadingProgress of partition", zap.Error(err)) + return err + } + if loadProgress == 0 { + partitionLoadStates[partition.Name] = LoadState_NotLoad + } else if loadProgress == 100 { + partitionLoadStates[partition.Name] = LoadState_Loaded + } else { + partitionLoadStates[partition.Name] = LoadState_Loading + } + } + } + + // fill segments + filledSegments := make([]*entity.Segment, 0) + if !force { + // Flush + segmentEntitiesBeforeFlush, err := b.getMilvusClient().GetPersistentSegmentInfo(ctx, collectionBackup.GetDbName(), collectionBackup.GetCollectionName()) + if err != nil { + return err + } + log.Info("GetPersistentSegmentInfo before flush from milvus", + zap.String("collectionName", collectionBackup.GetCollectionName()), + zap.Int("segmentNumBeforeFlush", len(segmentEntitiesBeforeFlush))) + newSealedSegmentIDs, flushedSegmentIDs, timeOfSeal, err := b.getMilvusClient().FlushV2(ctx, collectionBackup.GetDbName(), collectionBackup.GetCollectionName(), false) + if err != nil { + log.Error(fmt.Sprintf("fail to flush the collection: %s", collectionBackup.GetCollectionName())) + return err + } + log.Info("flush segments", + zap.String("collectionName", collectionBackup.GetCollectionName()), + zap.Int64s("newSealedSegmentIDs", newSealedSegmentIDs), + zap.Int64s("flushedSegmentIDs", flushedSegmentIDs), + zap.Int64("timeOfSeal", timeOfSeal)) + collectionBackup.BackupTimestamp = utils.ComposeTS(timeOfSeal, 0) + collectionBackup.BackupPhysicalTimestamp = uint64(timeOfSeal) + + flushSegmentIDs := append(newSealedSegmentIDs, flushedSegmentIDs...) + segmentEntitiesAfterFlush, err := b.getMilvusClient().GetPersistentSegmentInfo(ctx, collectionBackup.GetDbName(), collectionBackup.GetCollectionName()) + if err != nil { + return err + } + log.Info("GetPersistentSegmentInfo after flush from milvus", + zap.String("collectionName", collectionBackup.GetCollectionName()), + zap.Int("segmentNumBeforeFlush", len(segmentEntitiesBeforeFlush)), + zap.Int("segmentNumAfterFlush", len(segmentEntitiesAfterFlush))) + segmentDict := utils.ArrayToMap(flushSegmentIDs) + for _, seg := range segmentEntitiesAfterFlush { + sid := seg.ID + if _, ok := segmentDict[sid]; ok { + delete(segmentDict, sid) + filledSegments = append(filledSegments, seg) + } else { + log.Debug("this may be new segments after flush, skip it", zap.Int64("id", sid)) + } + } + for _, seg := range segmentEntitiesBeforeFlush { + sid := seg.ID + if _, ok := segmentDict[sid]; ok { + delete(segmentDict, sid) + filledSegments = append(filledSegments, seg) + } else { + log.Debug("this may be old segments before flush, skip it", zap.Int64("id", sid)) + } + } + if len(segmentDict) > 0 { + // very rare situation, segments return in flush doesn't exist in either segmentEntitiesBeforeFlush and segmentEntitiesAfterFlush + errorMsg := "Segment return in Flush not exist in GetPersistentSegmentInfo. segment ids: " + fmt.Sprint(utils.MapKeyArray(segmentDict)) + log.Warn(errorMsg) + } + } else { + // Flush + segmentEntitiesBeforeFlush, err := b.getMilvusClient().GetPersistentSegmentInfo(ctx, collectionBackup.GetDbName(), collectionBackup.GetCollectionName()) + if err != nil { + return err + } + log.Info("GetPersistentSegmentInfo from milvus", + zap.String("collectionName", collectionBackup.GetCollectionName()), + zap.Int("segmentNum", len(segmentEntitiesBeforeFlush))) + for _, seg := range segmentEntitiesBeforeFlush { + filledSegments = append(filledSegments, seg) + } + } + + if err != nil { + collectionBackup.StateCode = backuppb.BackupTaskStateCode_BACKUP_FAIL + collectionBackup.ErrorMessage = err.Error() + return err + } + log.Info("Finished fill segment", + zap.String("collectionName", collectionBackup.GetCollectionName())) + + segmentBackupInfos := make([]*backuppb.SegmentBackupInfo, 0) + partSegInfoMap := make(map[int64][]*backuppb.SegmentBackupInfo) + + segmentLevelBackupInfos := make([]*backuppb.SegmentBackupInfo, 0) + + for _, segment := range filledSegments { + segmentInfo, err := b.readSegmentInfo(ctx, segment.CollectionID, segment.ParititionID, segment.ID, segment.NumRows) + if err != nil { + return err + } + if len(segmentInfo.Binlogs) == 0 { + log.Warn("this segment has no insert binlog", zap.Int64("id", segment.ID)) + } + partSegInfoMap[segment.ParititionID] = append(partSegInfoMap[segment.ParititionID], segmentInfo) + segmentBackupInfos = append(segmentBackupInfos, segmentInfo) + segmentLevelBackupInfos = append(segmentLevelBackupInfos, segmentInfo) + } + log.Info("readSegmentInfo from storage", + zap.String("collectionName", collectionBackup.GetCollectionName()), + zap.Int("segmentNum", len(filledSegments))) + + for _, partition := range partitions { + partitionSegments := partSegInfoMap[partition.ID] + var size int64 = 0 + for _, seg := range partitionSegments { + size += seg.GetSize() + } + partitionBackupInfo := &backuppb.PartitionBackupInfo{ + PartitionId: partition.ID, + PartitionName: partition.Name, + CollectionId: collectionBackup.GetCollectionId(), + SegmentBackups: partSegInfoMap[partition.ID], + Size: size, + LoadState: partitionLoadStates[partition.Name], + } + partitionBackupInfos = append(partitionBackupInfos, partitionBackupInfo) + //partitionLevelBackupInfos = append(partitionLevelBackupInfos, partitionBackupInfo) + } + + //leveledBackupInfo.partitionLevel = &backuppb.PartitionLevelBackupInfo{ + // Infos: partitionLevelBackupInfos, + //} + collectionBackup.PartitionBackups = partitionBackupInfos + collectionBackup.LoadState = collectionLoadState + b.refreshBackupCache(backupInfo) + log.Info("finish build partition info", + zap.String("collectionName", collectionBackup.GetCollectionName()), + zap.Int("partitionNum", len(partitionBackupInfos))) + + log.Info("Begin copy data", + zap.String("collectionName", collectionBackup.GetCollectionName()), + zap.Int("segmentNum", len(segmentBackupInfos))) + + var collectionBackupSize int64 = 0 + for _, part := range partitionBackupInfos { + collectionBackupSize += part.GetSize() + if part.GetSize() > b.params.BackupCfg.MaxSegmentGroupSize { + log.Info("partition size is larger than MaxSegmentGroupSize, will separate segments into groups in backup files", + zap.Int64("collectionId", part.GetCollectionId()), + zap.Int64("partitionId", part.GetPartitionId()), + zap.Int64("partitionSize", part.GetSize()), + zap.Int64("MaxSegmentGroupSize", b.params.BackupCfg.MaxSegmentGroupSize)) + segments := partSegInfoMap[part.GetPartitionId()] + var bufferSize int64 = 0 + // 0 is illegal value, start from 1 + var segGroupID int64 = 1 + for _, seg := range segments { + if seg.Size > b.params.BackupCfg.MaxSegmentGroupSize && bufferSize == 0 { + seg.GroupId = segGroupID + segGroupID = segGroupID + 1 + } else if bufferSize+seg.Size > b.params.BackupCfg.MaxSegmentGroupSize { + segGroupID = segGroupID + 1 + seg.GroupId = segGroupID + bufferSize = 0 + bufferSize = bufferSize + seg.Size + } else { + seg.GroupId = segGroupID + bufferSize = bufferSize + seg.Size + } + } + } else { + log.Info("partition size is smaller than MaxSegmentGroupSize, won't separate segments into groups in backup files", + zap.Int64("collectionId", part.GetCollectionId()), + zap.Int64("partitionId", part.GetPartitionId()), + zap.Int64("partitionSize", part.GetSize()), + zap.Int64("MaxSegmentGroupSize", b.params.BackupCfg.MaxSegmentGroupSize)) + } + } + + err = b.copySegments(ctx, segmentBackupInfos, BackupBinlogDirPath(b.backupRootPath, backupInfo.GetName())) + b.refreshBackupCache(backupInfo) + + collectionBackup.Size = collectionBackupSize + collectionBackup.EndTime = time.Now().Unix() + return nil +} + +func (b *BackupContext) executeCreateBackupV2(ctx context.Context, request *backuppb.CreateBackupRequest, backupInfo *backuppb.BackupInfo) (*backuppb.BackupInfo, error) { + b.mu.Lock() + defer b.mu.Unlock() + + wp, err := common.NewWorkerPool(ctx, b.params.BackupCfg.BackupParallelism, RPS) + if err != nil { + return backupInfo, err + } + wp.Start() + log.Info("Start collection level backup pool", zap.Int("parallelism", b.params.BackupCfg.BackupParallelism)) + + backupInfo.BackupTimestamp = uint64(time.Now().UnixNano() / int64(time.Millisecond)) + backupInfo.StateCode = backuppb.BackupTaskStateCode_BACKUP_EXECUTING + + defer b.refreshBackupCache(backupInfo) + + // 1, get collection level meta + toBackupCollections, err := b.parseBackupCollections(request) + if err != nil { + log.Error("parse backup collections from request failed", zap.Error(err)) + return backupInfo, err + } + collectionNames := make([]string, len(toBackupCollections)) + for i, coll := range toBackupCollections { + collectionNames[i] = coll.collectionName + } + log.Info("collections to backup", zap.Strings("collections", collectionNames)) + + for _, collection := range toBackupCollections { + collectionClone := collection + job := func(ctx context.Context) error { + err := b.backupCollection(ctx, backupInfo, collectionClone, request.GetForce()) + return err + } + wp.Submit(job) + } + wp.Done() + if err := wp.Wait(); err != nil { + return backupInfo, err + } + + var backupSize int64 = 0 + leveledBackupInfo, err := treeToLevel(backupInfo) + if err != nil { + return backupInfo, err + } + for _, coll := range leveledBackupInfo.collectionLevel.GetInfos() { + backupSize += coll.GetSize() + } + backupInfo.Size = backupSize + backupInfo.EndTime = time.Now().UnixNano() / int64(time.Millisecond) + backupInfo.StateCode = backuppb.BackupTaskStateCode_BACKUP_SUCCESS + b.refreshBackupCache(backupInfo) + + // 7, write meta data + output, _ := serialize(backupInfo) + log.Debug("backup meta", zap.String("value", string(output.BackupMetaBytes))) + log.Debug("collection meta", zap.String("value", string(output.CollectionMetaBytes))) + log.Debug("partition meta", zap.String("value", string(output.PartitionMetaBytes))) + log.Debug("segment meta", zap.String("value", string(output.SegmentMetaBytes))) + + b.getStorageClient().Write(ctx, b.backupBucketName, BackupMetaPath(b.backupRootPath, backupInfo.GetName()), output.BackupMetaBytes) + b.getStorageClient().Write(ctx, b.backupBucketName, CollectionMetaPath(b.backupRootPath, backupInfo.GetName()), output.CollectionMetaBytes) + b.getStorageClient().Write(ctx, b.backupBucketName, PartitionMetaPath(b.backupRootPath, backupInfo.GetName()), output.PartitionMetaBytes) + b.getStorageClient().Write(ctx, b.backupBucketName, SegmentMetaPath(b.backupRootPath, backupInfo.GetName()), output.SegmentMetaBytes) + b.getStorageClient().Write(ctx, b.backupBucketName, FullMetaPath(b.backupRootPath, backupInfo.GetName()), output.FullMetaBytes) + + log.Info("finish executeCreateBackup", + zap.String("requestId", request.GetRequestId()), + zap.String("backupName", request.GetBackupName()), + zap.Strings("collections", request.GetCollectionNames()), + zap.Bool("async", request.GetAsync()), + zap.String("backup meta", string(output.BackupMetaBytes))) + return backupInfo, nil +} + +func (b *BackupContext) executeCreateBackup(ctx context.Context, request *backuppb.CreateBackupRequest, backupInfo *backuppb.BackupInfo) (*backuppb.BackupInfo, error) { b.mu.Lock() defer b.mu.Unlock() @@ -198,14 +622,17 @@ func (b BackupContext) executeCreateBackup(ctx context.Context, request *backupp log.Error("parse backup collections from request failed", zap.Error(err)) return backupInfo, err } - log.Info("collections to backup", zap.Any("collections", toBackupCollections)) + collectionNames := make([]string, len(toBackupCollections)) + for i, coll := range toBackupCollections { + collectionNames[i] = coll.collectionName + } + log.Info("collections to backup", zap.Strings("collections", collectionNames)) collectionBackupInfos := make([]*backuppb.CollectionBackupInfo, 0) partitionLevelBackupInfos := make([]*backuppb.PartitionBackupInfo, 0) for _, collection := range toBackupCollections { // list collection result is not complete - b.milvusClient.UsingDatabase(b.ctx, collection.db) - completeCollection, err := b.milvusClient.DescribeCollection(b.ctx, collection.collectionName) + completeCollection, err := b.getMilvusClient().DescribeCollection(b.ctx, collection.db, collection.collectionName) if err != nil { log.Error("fail in DescribeCollection", zap.Error(err)) return backupInfo, err @@ -241,14 +668,14 @@ func (b BackupContext) executeCreateBackup(ctx context.Context, request *backupp //if field.DataType != entity.FieldTypeBinaryVector && field.DataType != entity.FieldTypeFloatVector { // continue //} - fieldIndex, err := b.milvusClient.DescribeIndex(b.ctx, completeCollection.Name, field.Name) + fieldIndex, err := b.getMilvusClient().DescribeIndex(b.ctx, collection.db, completeCollection.Name, field.Name) if err != nil { - if strings.HasPrefix(err.Error(), "index doesn't exist") { + if strings.Contains(err.Error(), "index not found") || + strings.HasPrefix(err.Error(), "index doesn't exist") { // todo - log.Warn("field has no index", + log.Info("field has no index", zap.String("collection_name", completeCollection.Name), - zap.String("field_name", field.Name), - zap.Error(err)) + zap.String("field_name", field.Name)) continue } else { log.Error("fail in DescribeIndex", zap.Error(err)) @@ -298,16 +725,15 @@ func (b BackupContext) executeCreateBackup(ctx context.Context, request *backupp segmentLevelBackupInfos := make([]*backuppb.SegmentBackupInfo, 0) // backup collection for _, collection := range collectionBackupInfos { - b.milvusClient.UsingDatabase(b.ctx, collection.GetDbName()) partitionBackupInfos := make([]*backuppb.PartitionBackupInfo, 0) - partitions, err := b.milvusClient.ShowPartitions(b.ctx, collection.GetCollectionName()) + partitions, err := b.getMilvusClient().ShowPartitions(b.ctx, collection.GetDbName(), collection.GetCollectionName()) if err != nil { log.Error("fail to ShowPartitions", zap.Error(err)) return backupInfo, err } // use GetLoadingProgress currently, GetLoadState is a new interface @20230104 milvus pr#21515 - collectionLoadProgress, err := b.milvusClient.GetLoadingProgress(ctx, collection.GetCollectionName(), []string{}) + collectionLoadProgress, err := b.getMilvusClient().GetLoadingProgress(ctx, collection.GetDbName(), collection.GetCollectionName(), []string{}) if err != nil { log.Error("fail to GetLoadingProgress of collection", zap.Error(err)) return backupInfo, err @@ -328,7 +754,7 @@ func (b BackupContext) executeCreateBackup(ctx context.Context, request *backupp } else { collectionLoadState = LoadState_Loading for _, partition := range partitions { - loadProgress, err := b.milvusClient.GetLoadingProgress(ctx, collection.GetCollectionName(), []string{partition.Name}) + loadProgress, err := b.getMilvusClient().GetLoadingProgress(ctx, collection.GetDbName(), collection.GetCollectionName(), []string{partition.Name}) if err != nil { log.Error("fail to GetLoadingProgress of partition", zap.Error(err)) return backupInfo, err @@ -343,64 +769,76 @@ func (b BackupContext) executeCreateBackup(ctx context.Context, request *backupp } } - // Flush - segmentEntitiesBeforeFlush, err := b.milvusClient.GetPersistentSegmentInfo(ctx, collection.GetCollectionName()) - if err != nil { - return backupInfo, err - } - log.Info("GetPersistentSegmentInfo before flush from milvus", - zap.String("collectionName", collection.GetCollectionName()), - zap.Int("segmentNumBeforeFlush", len(segmentEntitiesBeforeFlush))) - - newSealedSegmentIDs, flushedSegmentIDs, timeOfSeal, err := b.milvusClient.FlushV2(ctx, collection.GetCollectionName(), false) - if err != nil { - log.Error(fmt.Sprintf("fail to flush the collection: %s", collection.GetCollectionName())) - return backupInfo, err - } - log.Info("flush segments", - zap.String("collectionName", collection.GetCollectionName()), - zap.Int64s("newSealedSegmentIDs", newSealedSegmentIDs), - zap.Int64s("flushedSegmentIDs", flushedSegmentIDs), - zap.Int64("timeOfSeal", timeOfSeal)) - collection.BackupTimestamp = utils.ComposeTS(timeOfSeal, 0) - collection.BackupPhysicalTimestamp = uint64(timeOfSeal) - - flushSegmentIDs := append(newSealedSegmentIDs, flushedSegmentIDs...) - segmentEntitiesAfterFlush, err := b.milvusClient.GetPersistentSegmentInfo(ctx, collection.GetCollectionName()) - if err != nil { - return backupInfo, err - } - log.Info("GetPersistentSegmentInfo after flush from milvus", - zap.String("collectionName", collection.GetCollectionName()), - zap.Int("segmentNumBeforeFlush", len(segmentEntitiesBeforeFlush)), - zap.Int("segmentNumAfterFlush", len(segmentEntitiesAfterFlush))) - // fill segments filledSegments := make([]*entity.Segment, 0) - segmentDict := utils.ArrayToMap(flushSegmentIDs) - for _, seg := range segmentEntitiesAfterFlush { - sid := seg.ID - if _, ok := segmentDict[sid]; ok { - delete(segmentDict, sid) - filledSegments = append(filledSegments, seg) - } else { - log.Warn("this may be new segments after flush, skip it", zap.Int64("id", sid)) + if !request.GetForce() { + // Flush + segmentEntitiesBeforeFlush, err := b.getMilvusClient().GetPersistentSegmentInfo(ctx, collection.GetDbName(), collection.GetCollectionName()) + if err != nil { + return backupInfo, err } - } - for _, seg := range segmentEntitiesBeforeFlush { - sid := seg.ID - if _, ok := segmentDict[sid]; ok { - delete(segmentDict, sid) + log.Info("GetPersistentSegmentInfo before flush from milvus", + zap.String("collectionName", collection.GetCollectionName()), + zap.Int("segmentNumBeforeFlush", len(segmentEntitiesBeforeFlush))) + newSealedSegmentIDs, flushedSegmentIDs, timeOfSeal, err := b.getMilvusClient().FlushV2(ctx, collection.GetDbName(), collection.GetCollectionName(), false) + if err != nil { + log.Error(fmt.Sprintf("fail to flush the collection: %s.%s", collection.GetDbName(), collection.GetCollectionName())) + return backupInfo, err + } + log.Info("flush segments", + zap.String("collectionName", collection.GetCollectionName()), + zap.Int64s("newSealedSegmentIDs", newSealedSegmentIDs), + zap.Int64s("flushedSegmentIDs", flushedSegmentIDs), + zap.Int64("timeOfSeal", timeOfSeal)) + collection.BackupTimestamp = utils.ComposeTS(timeOfSeal, 0) + collection.BackupPhysicalTimestamp = uint64(timeOfSeal) + + flushSegmentIDs := append(newSealedSegmentIDs, flushedSegmentIDs...) + segmentEntitiesAfterFlush, err := b.getMilvusClient().GetPersistentSegmentInfo(ctx, collection.GetDbName(), collection.GetCollectionName()) + if err != nil { + return backupInfo, err + } + log.Info("GetPersistentSegmentInfo after flush from milvus", + zap.String("collectionName", collection.GetCollectionName()), + zap.Int("segmentNumBeforeFlush", len(segmentEntitiesBeforeFlush)), + zap.Int("segmentNumAfterFlush", len(segmentEntitiesAfterFlush))) + segmentDict := utils.ArrayToMap(flushSegmentIDs) + for _, seg := range segmentEntitiesAfterFlush { + sid := seg.ID + if _, ok := segmentDict[sid]; ok { + delete(segmentDict, sid) + filledSegments = append(filledSegments, seg) + } else { + log.Debug("this may be new segments after flush, skip it", zap.Int64("id", sid)) + } + } + for _, seg := range segmentEntitiesBeforeFlush { + sid := seg.ID + if _, ok := segmentDict[sid]; ok { + delete(segmentDict, sid) + filledSegments = append(filledSegments, seg) + } else { + log.Debug("this may be old segments before flush, skip it", zap.Int64("id", sid)) + } + } + if len(segmentDict) > 0 { + // very rare situation, segments return in flush doesn't exist in either segmentEntitiesBeforeFlush and segmentEntitiesAfterFlush + errorMsg := "Segment return in Flush not exist in GetPersistentSegmentInfo. segment ids: " + fmt.Sprint(utils.MapKeyArray(segmentDict)) + log.Warn(errorMsg) + } + } else { + // Flush + segmentEntitiesBeforeFlush, err := b.getMilvusClient().GetPersistentSegmentInfo(ctx, collection.GetDbName(), collection.GetCollectionName()) + if err != nil { + return backupInfo, err + } + log.Info("GetPersistentSegmentInfo from milvus", + zap.String("collectionName", collection.GetCollectionName()), + zap.Int("segmentNum", len(segmentEntitiesBeforeFlush))) + for _, seg := range segmentEntitiesBeforeFlush { filledSegments = append(filledSegments, seg) - } else { - log.Warn("this may be old segments before flush, skip it", zap.Int64("id", sid)) } } - if len(segmentDict) > 0 { - // very rare situation, segments return in flush doesn't exist in either segmentEntitiesBeforeFlush and segmentEntitiesAfterFlush - errorMsg := "Segment return in Flush not exist in GetPersistentSegmentInfo. segment ids: " + fmt.Sprint(utils.MapKeyArray(segmentDict)) - log.Warn(errorMsg) - } if err != nil { collection.StateCode = backuppb.BackupTaskStateCode_BACKUP_FAIL @@ -510,12 +948,12 @@ func (b BackupContext) executeCreateBackup(ctx context.Context, request *backupp backupSize += coll.GetSize() } backupInfo.Size = backupSize + backupInfo.EndTime = time.Now().UnixNano() / int64(time.Millisecond) + backupInfo.StateCode = backuppb.BackupTaskStateCode_BACKUP_SUCCESS backupInfo, err = b.refreshBackupMeta(id, backupInfo, leveledBackupInfo) if err != nil { return backupInfo, err } - backupInfo.EndTime = time.Now().UnixNano() / int64(time.Millisecond) - backupInfo.StateCode = backuppb.BackupTaskStateCode_BACKUP_SUCCESS // 7, write meta data output, _ := serialize(backupInfo) log.Debug("backup meta", zap.String("value", string(output.BackupMetaBytes))) @@ -523,10 +961,11 @@ func (b BackupContext) executeCreateBackup(ctx context.Context, request *backupp log.Debug("partition meta", zap.String("value", string(output.PartitionMetaBytes))) log.Debug("segment meta", zap.String("value", string(output.SegmentMetaBytes))) - b.storageClient.Write(ctx, b.backupBucketName, BackupMetaPath(b.backupRootPath, backupInfo.GetName()), output.BackupMetaBytes) - b.storageClient.Write(ctx, b.backupBucketName, CollectionMetaPath(b.backupRootPath, backupInfo.GetName()), output.CollectionMetaBytes) - b.storageClient.Write(ctx, b.backupBucketName, PartitionMetaPath(b.backupRootPath, backupInfo.GetName()), output.PartitionMetaBytes) - b.storageClient.Write(ctx, b.backupBucketName, SegmentMetaPath(b.backupRootPath, backupInfo.GetName()), output.SegmentMetaBytes) + b.getStorageClient().Write(ctx, b.backupBucketName, BackupMetaPath(b.backupRootPath, backupInfo.GetName()), output.BackupMetaBytes) + b.getStorageClient().Write(ctx, b.backupBucketName, CollectionMetaPath(b.backupRootPath, backupInfo.GetName()), output.CollectionMetaBytes) + b.getStorageClient().Write(ctx, b.backupBucketName, PartitionMetaPath(b.backupRootPath, backupInfo.GetName()), output.PartitionMetaBytes) + b.getStorageClient().Write(ctx, b.backupBucketName, SegmentMetaPath(b.backupRootPath, backupInfo.GetName()), output.SegmentMetaBytes) + b.getStorageClient().Write(ctx, b.backupBucketName, FullMetaPath(b.backupRootPath, backupInfo.GetName()), output.FullMetaBytes) log.Info("finish executeCreateBackup", zap.String("requestId", request.GetRequestId()), @@ -537,12 +976,24 @@ func (b BackupContext) executeCreateBackup(ctx context.Context, request *backupp return backupInfo, nil } -func (b BackupContext) copySegments(ctx context.Context, segments []*backuppb.SegmentBackupInfo, dstPath string) error { - wp, err := common.NewWorkerPool(ctx, WORKER_NUM, RPS) +func (b *BackupContext) copySegments(ctx context.Context, segments []*backuppb.SegmentBackupInfo, dstPath string) error { + wp, err := common.NewWorkerPool(ctx, b.params.BackupCfg.BackupCopyDataParallelism, RPS) if err != nil { return err } wp.Start() + + // generate target path + // milvus_rootpath/insert_log/collection_id/partition_id/segment_id/ => + // backup_rootpath/backup_name/binlog/insert_log/collection_id/partition_id/group_id/segment_id + backupPathFunc := func(binlogPath, rootPath, backupBinlogPath string) string { + if rootPath == "" { + return dstPath + SEPERATOR + binlogPath + } else { + return strings.Replace(binlogPath, rootPath, dstPath, 1) + } + } + for _, segment := range segments { start := time.Now().Unix() log.Debug("copy segment", @@ -553,14 +1004,7 @@ func (b BackupContext) copySegments(ctx context.Context, segments []*backuppb.Se // insert log for _, binlogs := range segment.GetBinlogs() { for _, binlog := range binlogs.GetBinlogs() { - // generate target path - // milvus_rootpath/insert_log/collection_id/partition_id/segment_id/ => - // backup_rootpath/backup_name/insert_log/collection_id/partition_id/group_id/segment_id - targetPath := strings.Replace(binlog.GetLogPath(), - b.milvusRootPath, - dstPath, - //BackupBinlogDirPath(b.backupRootPath, backupInfo.GetName()), - 1) + targetPath := backupPathFunc(binlog.GetLogPath(), b.milvusRootPath, dstPath) if segment.GetGroupId() != 0 { targetPath = strings.Replace(targetPath, strconv.FormatInt(segment.GetPartitionId(), 10), @@ -573,7 +1017,7 @@ func (b BackupContext) copySegments(ctx context.Context, segments []*backuppb.Se binlog := binlog job := func(ctx context.Context) error { - exist, err := b.storageClient.Exist(ctx, b.milvusBucketName, binlog.GetLogPath()) + exist, err := b.getStorageClient().Exist(ctx, b.milvusBucketName, binlog.GetLogPath()) if err != nil { log.Info("Fail to check file exist", zap.Error(err), @@ -587,7 +1031,7 @@ func (b BackupContext) copySegments(ctx context.Context, segments []*backuppb.Se return err } - err = b.storageClient.Copy(ctx, b.milvusBucketName, b.backupBucketName, binlog.GetLogPath(), targetPath) + err = b.getStorageClient().Copy(ctx, b.milvusBucketName, b.backupBucketName, binlog.GetLogPath(), targetPath) if err != nil { log.Info("Fail to copy file", zap.Error(err), @@ -608,14 +1052,7 @@ func (b BackupContext) copySegments(ctx context.Context, segments []*backuppb.Se // delta log for _, binlogs := range segment.GetDeltalogs() { for _, binlog := range binlogs.GetBinlogs() { - // generate target path - // milvus_rootpath/delta_log/collection_id/partition_id/segment_id/ => - // backup_rootpath/backup_name/delta_log/collection_id/partition_id/group_id/segment_id - targetPath := strings.Replace(binlog.GetLogPath(), - b.milvusRootPath, - dstPath, - //BackupBinlogDirPath(b.backupRootPath, backupInfo.GetName()), - 1) + targetPath := backupPathFunc(binlog.GetLogPath(), b.milvusRootPath, dstPath) if segment.GetGroupId() != 0 { targetPath = strings.Replace(targetPath, strconv.FormatInt(segment.GetPartitionId(), 10), @@ -628,7 +1065,7 @@ func (b BackupContext) copySegments(ctx context.Context, segments []*backuppb.Se binlog := binlog job := func(ctx context.Context) error { - exist, err := b.storageClient.Exist(ctx, b.milvusBucketName, binlog.GetLogPath()) + exist, err := b.getStorageClient().Exist(ctx, b.milvusBucketName, binlog.GetLogPath()) if err != nil { log.Info("Fail to check file exist", zap.Error(err), @@ -641,7 +1078,7 @@ func (b BackupContext) copySegments(ctx context.Context, segments []*backuppb.Se zap.String("file", binlog.GetLogPath())) return err } - err = b.storageClient.Copy(ctx, b.milvusBucketName, b.backupBucketName, binlog.GetLogPath(), targetPath) + err = b.getStorageClient().Copy(ctx, b.milvusBucketName, b.backupBucketName, binlog.GetLogPath(), targetPath) if err != nil { log.Info("Fail to copy file", zap.Error(err), @@ -672,22 +1109,34 @@ func (b BackupContext) copySegments(ctx context.Context, segments []*backuppb.Se return nil } -func (b BackupContext) readSegmentInfo(ctx context.Context, collecitonID int64, partitionID int64, segmentID int64, numOfRows int64) (*backuppb.SegmentBackupInfo, error) { +func (b *BackupContext) readSegmentInfo(ctx context.Context, collectionID int64, partitionID int64, segmentID int64, numOfRows int64) (*backuppb.SegmentBackupInfo, error) { segmentBackupInfo := backuppb.SegmentBackupInfo{ SegmentId: segmentID, - CollectionId: collecitonID, + CollectionId: collectionID, PartitionId: partitionID, NumOfRows: numOfRows, } var size int64 = 0 + var rootPath string + + if b.params.MinioCfg.RootPath != "" { + log.Debug("params.MinioCfg.RootPath", zap.String("params.MinioCfg.RootPath", b.params.MinioCfg.RootPath)) + rootPath = fmt.Sprintf("%s/", b.params.MinioCfg.RootPath) + } else { + rootPath = "" + } - insertPath := fmt.Sprintf("%s/%s/%v/%v/%v/", b.params.MinioCfg.RootPath, "insert_log", collecitonID, partitionID, segmentID) - log.Debug("insertPath", zap.String("insertPath", insertPath)) - fieldsLogDir, _, _ := b.storageClient.ListWithPrefix(ctx, b.milvusBucketName, insertPath, false) - log.Debug("fieldsLogDir", zap.Any("fieldsLogDir", fieldsLogDir)) + insertPath := fmt.Sprintf("%s%s/%v/%v/%v/", rootPath, "insert_log", collectionID, partitionID, segmentID) + log.Debug("insertPath", zap.String("bucket", b.milvusBucketName), zap.String("insertPath", insertPath)) + fieldsLogDir, _, err := b.getStorageClient().ListWithPrefix(ctx, b.milvusBucketName, insertPath, false) + if err != nil { + log.Error("Fail to list segment path", zap.String("insertPath", insertPath), zap.Error(err)) + return &segmentBackupInfo, err + } + log.Debug("fieldsLogDir", zap.String("bucket", b.milvusBucketName), zap.Any("fieldsLogDir", fieldsLogDir)) insertLogs := make([]*backuppb.FieldBinlog, 0) for _, fieldLogDir := range fieldsLogDir { - binlogPaths, sizes, _ := b.storageClient.ListWithPrefix(ctx, b.milvusBucketName, fieldLogDir, false) + binlogPaths, sizes, _ := b.getStorageClient().ListWithPrefix(ctx, b.milvusBucketName, fieldLogDir, false) fieldIdStr := strings.Replace(strings.Replace(fieldLogDir, insertPath, "", 1), SEPERATOR, "", -1) fieldId, _ := strconv.ParseInt(fieldIdStr, 10, 64) binlogs := make([]*backuppb.Binlog, 0) @@ -704,11 +1153,11 @@ func (b BackupContext) readSegmentInfo(ctx context.Context, collecitonID int64, }) } - deltaLogPath := fmt.Sprintf("%s/%s/%v/%v/%v/", b.params.MinioCfg.RootPath, "delta_log", collecitonID, partitionID, segmentID) - deltaFieldsLogDir, _, _ := b.storageClient.ListWithPrefix(ctx, b.milvusBucketName, deltaLogPath, false) + deltaLogPath := fmt.Sprintf("%s%s/%v/%v/%v/", rootPath, "delta_log", collectionID, partitionID, segmentID) + deltaFieldsLogDir, _, _ := b.getStorageClient().ListWithPrefix(ctx, b.milvusBucketName, deltaLogPath, false) deltaLogs := make([]*backuppb.FieldBinlog, 0) for _, deltaFieldLogDir := range deltaFieldsLogDir { - binlogPaths, sizes, _ := b.storageClient.ListWithPrefix(ctx, b.milvusBucketName, deltaFieldLogDir, false) + binlogPaths, sizes, _ := b.getStorageClient().ListWithPrefix(ctx, b.milvusBucketName, deltaFieldLogDir, false) fieldIdStr := strings.Replace(strings.Replace(deltaFieldLogDir, deltaLogPath, "", 1), SEPERATOR, "", -1) fieldId, _ := strconv.ParseInt(fieldIdStr, 10, 64) binlogs := make([]*backuppb.Binlog, 0) @@ -730,7 +1179,7 @@ func (b BackupContext) readSegmentInfo(ctx context.Context, collecitonID int64, }) } - //statsLogPath := fmt.Sprintf("%s/%s/%v/%v/%v/", b.params.MinioCfg.RootPath, "stats_log", collecitonID, partitionID, segmentID) + //statsLogPath := fmt.Sprintf("%s/%s/%v/%v/%v/", b.params.MinioCfg.RootPath, "stats_log", collectionID, partitionID, segmentID) //statsFieldsLogDir, _, _ := b.storageClient.ListWithPrefix(ctx, b.milvusBucketName, statsLogPath, false) //statsLogs := make([]*backuppb.FieldBinlog, 0) //for _, statsFieldLogDir := range statsFieldsLogDir { diff --git a/core/backup_impl_restore_backup.go b/core/backup_impl_restore_backup.go index 585717b7..40e809db 100644 --- a/core/backup_impl_restore_backup.go +++ b/core/backup_impl_restore_backup.go @@ -7,29 +7,34 @@ import ( "time" "github.com/cockroachdb/errors" + jsoniter "github.com/json-iterator/go" gomilvus "github.com/milvus-io/milvus-sdk-go/v2/client" "github.com/milvus-io/milvus-sdk-go/v2/entity" "go.uber.org/zap" "github.com/zilliztech/milvus-backup/core/proto/backuppb" "github.com/zilliztech/milvus-backup/core/utils" + "github.com/zilliztech/milvus-backup/internal/common" "github.com/zilliztech/milvus-backup/internal/log" "github.com/zilliztech/milvus-backup/internal/util/retry" ) -func (b BackupContext) RestoreBackup(ctx context.Context, request *backuppb.RestoreBackupRequest) *backuppb.RestoreBackupResponse { +func (b *BackupContext) RestoreBackup(ctx context.Context, request *backuppb.RestoreBackupRequest) *backuppb.RestoreBackupResponse { if request.GetRequestId() == "" { request.RequestId = utils.UUID() } log.Info("receive RestoreBackupRequest", zap.String("requestId", request.GetRequestId()), zap.String("backupName", request.GetBackupName()), + zap.Bool("onlyMeta", request.GetMetaOnly()), + zap.Bool("restoreIndex", request.GetRestoreIndex()), zap.Strings("collections", request.GetCollectionNames()), zap.String("CollectionSuffix", request.GetCollectionSuffix()), zap.Any("CollectionRenames", request.GetCollectionRenames()), zap.Bool("async", request.GetAsync()), zap.String("bucketName", request.GetBucketName()), - zap.String("path", request.GetPath())) + zap.String("path", request.GetPath()), + zap.String("databaseCollections", utils.GetRestoreDBCollections(request))) resp := &backuppb.RestoreBackupResponse{ RequestId: request.GetRequestId(), @@ -99,8 +104,49 @@ func (b BackupContext) RestoreBackup(ctx context.Context, request *backuppb.Rest // 2, initial restoreCollectionTasks toRestoreCollectionBackups := make([]*backuppb.CollectionBackupInfo, 0) - if len(request.GetCollectionNames()) == 0 { - toRestoreCollectionBackups = backup.GetCollectionBackups() + + dbCollectionsStr := utils.GetRestoreDBCollections(request) + if dbCollectionsStr != "" { + var dbCollections DbCollections + err := jsoniter.UnmarshalFromString(dbCollectionsStr, &dbCollections) + if err != nil { + log.Error("fail in unmarshal dbCollections in RestoreBackupRequest", zap.String("dbCollections", dbCollectionsStr), zap.Error(err)) + errorMsg := fmt.Sprintf("fail in unmarshal dbCollections in RestoreBackupRequest, dbCollections: %s, err: %s", request.GetDbCollections(), err) + log.Error(errorMsg) + resp.Code = backuppb.ResponseCode_Fail + resp.Msg = errorMsg + return resp + } + for db, collections := range dbCollections { + if len(collections) == 0 { + for _, collectionBackup := range backup.GetCollectionBackups() { + if collectionBackup.GetDbName() == "" { + collectionBackup.DbName = "default" + } + if collectionBackup.GetDbName() == db { + toRestoreCollectionBackups = append(toRestoreCollectionBackups, collectionBackup) + } + } + } else { + for _, coll := range collections { + for _, collectionBackup := range backup.GetCollectionBackups() { + if collectionBackup.GetDbName() == "" { + collectionBackup.DbName = "default" + } + if collectionBackup.GetDbName() == db && collectionBackup.CollectionName == coll { + toRestoreCollectionBackups = append(toRestoreCollectionBackups, collectionBackup) + } + } + } + } + } + } else if len(request.GetCollectionNames()) == 0 { + for _, collectionBackup := range backup.GetCollectionBackups() { + if collectionBackup.GetDbName() == "" { + collectionBackup.DbName = "default" + } + toRestoreCollectionBackups = append(toRestoreCollectionBackups, collectionBackup) + } } else { collectionNameDict := make(map[string]bool) for _, collectionName := range request.GetCollectionNames() { @@ -113,12 +159,10 @@ func (b BackupContext) RestoreBackup(ctx context.Context, request *backuppb.Rest collectionNameDict[fullCollectionName] = true } for _, collectionBackup := range backup.GetCollectionBackups() { - dbName := "default" - if collectionBackup.GetDbName() != "" { - dbName = collectionBackup.GetDbName() + if collectionBackup.GetDbName() == "" { + collectionBackup.DbName = "default" } - fullCollectionName := dbName + "." + collectionBackup.GetCollectionName() - collectionBackup.GetCollectionName() + fullCollectionName := collectionBackup.GetDbName() + "." + collectionBackup.GetCollectionName() if collectionNameDict[fullCollectionName] { toRestoreCollectionBackups = append(toRestoreCollectionBackups, collectionBackup) } @@ -126,29 +170,85 @@ func (b BackupContext) RestoreBackup(ctx context.Context, request *backuppb.Rest } log.Info("Collections to restore", zap.Int("collection_num", len(toRestoreCollectionBackups))) + // add default db in collection_renames if not set + collectionRenames := make(map[string]string) + dbRenames := make(map[string]string) + for oldname, newName := range request.GetCollectionRenames() { + if strings.HasSuffix(oldname, ".*") && strings.HasSuffix(newName, ".*") { + dbRenames[strings.Split(oldname, ".*")[0]] = strings.Split(newName, ".*")[0] + } + + var fullCollectionName string + if strings.Contains(oldname, ".") { + fullCollectionName = oldname + } else { + fullCollectionName = "default." + oldname + } + var fullCollectionNewName string + if strings.Contains(newName, ".") { + fullCollectionNewName = newName + } else { + fullCollectionNewName = "default." + newName + } + collectionRenames[fullCollectionName] = fullCollectionNewName + } + restoreCollectionTasks := make([]*backuppb.RestoreCollectionTask, 0) for _, restoreCollection := range toRestoreCollectionBackups { - backupCollectionName := restoreCollection.GetSchema().GetName() - var targetCollectionName string + backupDBCollectionName := restoreCollection.DbName + "." + restoreCollection.GetSchema().GetName() + targetDBName := restoreCollection.DbName + targetCollectionName := restoreCollection.GetSchema().GetName() + if value, ok := dbRenames[restoreCollection.DbName]; ok { + targetDBName = value + } // rename collection, rename map has higher priority then suffix - if len(request.GetCollectionRenames()) > 0 && request.GetCollectionRenames()[backupCollectionName] != "" { - targetCollectionName = request.GetCollectionRenames()[backupCollectionName] + if len(request.GetCollectionRenames()) > 0 && collectionRenames[backupDBCollectionName] != "" { + targetDBName = strings.Split(collectionRenames[backupDBCollectionName], ".")[0] + targetCollectionName = strings.Split(collectionRenames[backupDBCollectionName], ".")[1] } else if request.GetCollectionSuffix() != "" { - targetCollectionName = backupCollectionName + request.GetCollectionSuffix() - } else { - targetCollectionName = backupCollectionName + targetCollectionName = targetCollectionName + request.GetCollectionSuffix() } + targetDBCollectionName := targetDBName + "." + targetCollectionName - exist, err := b.milvusClient.HasCollection(ctx, targetCollectionName) + // check if the database exist, if not, create it first + dbs, err := b.getMilvusClient().ListDatabases(ctx) if err != nil { - errorMsg := fmt.Sprintf("fail to check whether the collection is exist, collection_name: %s, err: %s", targetCollectionName, err) + errorMsg := fmt.Sprintf("fail to list databases, err: %s", err) + log.Error(errorMsg) + resp.Code = backuppb.ResponseCode_Fail + resp.Msg = errorMsg + return resp + } + var hasDatabase = false + for _, db := range dbs { + if db.Name == targetDBName { + hasDatabase = true + break + } + } + if !hasDatabase { + err := b.getMilvusClient().CreateDatabase(ctx, targetDBName) + if err != nil { + errorMsg := fmt.Sprintf("fail to create database %s, err: %s", targetDBName, err) + log.Error(errorMsg) + resp.Code = backuppb.ResponseCode_Fail + resp.Msg = errorMsg + return resp + } + log.Info("create database", zap.String("database", targetDBName)) + } + + // check if the collection exist, if exist, will not restore + exist, err := b.getMilvusClient().HasCollection(ctx, targetDBName, targetCollectionName) + if err != nil { + errorMsg := fmt.Sprintf("fail to check whether the collection is exist, collection_name: %s, err: %s", targetDBCollectionName, err) log.Error(errorMsg) resp.Code = backuppb.ResponseCode_Fail resp.Msg = errorMsg return resp } if exist { - errorMsg := fmt.Sprintf("The collection to restore already exists, backupCollectName: %s, targetCollectionName: %s", backupCollectionName, targetCollectionName) + errorMsg := fmt.Sprintf("The collection to restore already exists, backupCollectName: %s, targetCollectionName: %s", backupDBCollectionName, targetDBCollectionName) log.Error(errorMsg) resp.Code = backuppb.ResponseCode_Fail resp.Msg = errorMsg @@ -166,11 +266,14 @@ func (b BackupContext) RestoreBackup(ctx context.Context, request *backuppb.Rest StateCode: backuppb.RestoreTaskStateCode_INITIAL, StartTime: time.Now().Unix(), CollBackup: restoreCollection, + TargetDbName: targetDBName, TargetCollectionName: targetCollectionName, PartitionRestoreTasks: []*backuppb.RestorePartitionTask{}, ToRestoreSize: toRestoreSize, RestoredSize: 0, Progress: 0, + MetaOnly: request.GetMetaOnly(), + RestoreIndex: request.GetRestoreIndex(), } restoreCollectionTasks = append(restoreCollectionTasks, restoreCollectionTask) task.CollectionRestoreTasks = restoreCollectionTasks @@ -201,10 +304,17 @@ func (b BackupContext) RestoreBackup(ctx context.Context, request *backuppb.Rest } } -func (b BackupContext) executeRestoreBackupTask(ctx context.Context, backupBucketName string, backupPath string, backup *backuppb.BackupInfo, task *backuppb.RestoreBackupTask) (*backuppb.RestoreBackupTask, error) { +func (b *BackupContext) executeRestoreBackupTask(ctx context.Context, backupBucketName string, backupPath string, backup *backuppb.BackupInfo, task *backuppb.RestoreBackupTask) (*backuppb.RestoreBackupTask, error) { b.mu.Lock() defer b.mu.Unlock() + wp, err := common.NewWorkerPool(ctx, b.params.BackupCfg.RestoreParallelism, RPS) + if err != nil { + return task, err + } + wp.Start() + log.Info("Start collection level restore pool", zap.Int("parallelism", b.params.BackupCfg.RestoreParallelism)) + id := task.GetId() b.restoreTasks[id] = task task.StateCode = backuppb.RestoreTaskStateCode_EXECUTING @@ -222,22 +332,34 @@ func (b BackupContext) executeRestoreBackupTask(ctx context.Context, backupBucke // 3, execute restoreCollectionTasks for _, restoreCollectionTask := range restoreCollectionTasks { - endTask, err := b.executeRestoreCollectionTask(ctx, backupBucketName, backupPath, restoreCollectionTask) - if err != nil { - log.Error("executeRestoreCollectionTask failed", - zap.String("TargetCollectionName", restoreCollectionTask.GetTargetCollectionName()), - zap.Error(err)) - return task, err - } - log.Info("finish restore collection", zap.String("collection_name", restoreCollectionTask.GetTargetCollectionName())) - restoreCollectionTask.StateCode = backuppb.RestoreTaskStateCode_SUCCESS - task.RestoredSize += endTask.RestoredSize - if task.GetToRestoreSize() == 0 { - task.Progress = 100 - } else { - task.Progress = int32(100 * task.GetRestoredSize() / task.GetToRestoreSize()) + restoreCollectionTaskClone := restoreCollectionTask + job := func(ctx context.Context) error { + endTask, err := b.executeRestoreCollectionTask(ctx, backupBucketName, backupPath, restoreCollectionTaskClone, id) + if err != nil { + log.Error("executeRestoreCollectionTask failed", + zap.String("TargetDBName", restoreCollectionTaskClone.GetTargetDbName()), + zap.String("TargetCollectionName", restoreCollectionTaskClone.GetTargetCollectionName()), + zap.Error(err)) + return err + } + log.Info("finish restore collection", + zap.String("db_name", restoreCollectionTaskClone.GetTargetDbName()), + zap.String("collection_name", restoreCollectionTaskClone.GetTargetCollectionName())) + restoreCollectionTaskClone.StateCode = backuppb.RestoreTaskStateCode_SUCCESS + task.RestoredSize += endTask.RestoredSize + if task.GetToRestoreSize() == 0 { + task.Progress = 100 + } else { + task.Progress = int32(100 * task.GetRestoredSize() / task.GetToRestoreSize()) + } + updateRestoreTaskFunc(id, task) + return nil } - updateRestoreTaskFunc(id, task) + wp.Submit(job) + } + wp.Done() + if err := wp.Wait(); err != nil { + return task, err } task.StateCode = backuppb.RestoreTaskStateCode_SUCCESS @@ -245,11 +367,13 @@ func (b BackupContext) executeRestoreBackupTask(ctx context.Context, backupBucke return task, nil } -func (b BackupContext) executeRestoreCollectionTask(ctx context.Context, backupBucketName string, backupPath string, task *backuppb.RestoreCollectionTask) (*backuppb.RestoreCollectionTask, error) { +func (b *BackupContext) executeRestoreCollectionTask(ctx context.Context, backupBucketName string, backupPath string, task *backuppb.RestoreCollectionTask, parentTaskID string) (*backuppb.RestoreCollectionTask, error) { + targetDBName := task.GetTargetDbName() targetCollectionName := task.GetTargetCollectionName() task.StateCode = backuppb.RestoreTaskStateCode_EXECUTING log.Info("start restore", - zap.String("collection_name", task.GetTargetCollectionName()), + zap.String("db_name", targetDBName), + zap.String("collection_name", targetCollectionName), zap.String("backupBucketName", backupBucketName), zap.String("backupPath", backupPath)) // create collection @@ -283,24 +407,20 @@ func (b BackupContext) executeRestoreCollectionTask(ctx context.Context, backupB EnableDynamicField: task.GetCollBackup().GetSchema().GetEnableDynamicField(), } - dbName := task.GetCollBackup().GetDbName() - if dbName == "" { - dbName = "default" - } - b.milvusClient.UsingDatabase(ctx, dbName) - err := retry.Do(ctx, func() error { if hasPartitionKey { partitionNum := len(task.GetCollBackup().GetPartitionBackups()) - return b.milvusClient.CreateCollection( + return b.getMilvusClient().CreateCollection( ctx, + targetDBName, collectionSchema, task.GetCollBackup().GetShardsNum(), gomilvus.WithConsistencyLevel(entity.ConsistencyLevel(task.GetCollBackup().GetConsistencyLevel())), gomilvus.WithPartitionNum(int64(partitionNum))) } - return b.milvusClient.CreateCollection( + return b.getMilvusClient().CreateCollection( ctx, + targetDBName, collectionSchema, task.GetCollBackup().GetShardsNum(), gomilvus.WithConsistencyLevel(entity.ConsistencyLevel(task.GetCollBackup().GetConsistencyLevel()))) @@ -312,30 +432,125 @@ func (b BackupContext) executeRestoreCollectionTask(ctx context.Context, backupB task.ErrorMessage = errorMsg return task, err } - log.Info("create collection", zap.String("collectionName", targetCollectionName), zap.Bool("hasPartitionKey", hasPartitionKey)) + log.Info("create collection", + zap.String("database", targetDBName), + zap.String("collectionName", targetCollectionName), + zap.Bool("hasPartitionKey", hasPartitionKey)) + + if task.GetRestoreIndex() { + indexes := task.GetCollBackup().GetIndexInfos() + for _, index := range indexes { + idx := entity.NewGenericIndex(index.GetIndexName(), entity.IndexType(index.GetIndexType()), index.GetFieldName(), index.GetParams()) + err := b.getMilvusClient().CreateIndex(ctx, targetDBName, targetCollectionName, index.GetFieldName(), idx, true) + if err != nil { + log.Warn("Fail to restore index", zap.Error(err)) + return task, err + } + } + } + tempDir := "restore-temp-" + parentTaskID + SEPERATOR + isSameBucket := b.milvusBucketName == backupBucketName + // clean the temporary file + defer func() { + if !isSameBucket { + log.Info("Delete temporary file", zap.String("dir", tempDir)) + err := b.getStorageClient().RemoveWithPrefix(ctx, b.milvusBucketName, tempDir) + if err != nil { + log.Warn("Delete temporary file failed", zap.Error(err)) + } + } + }() + + jobIds := make([]int64, 0) for _, partitionBackup := range task.GetCollBackup().GetPartitionBackups() { - exist, err := b.milvusClient.HasPartition(ctx, targetCollectionName, partitionBackup.GetPartitionName()) + partitionBackup2 := partitionBackup + job := func(ctx context.Context) error { + log.Info("start restore partition", + zap.String("backupCollectionName", task.GetCollBackup().GetCollectionName()), + zap.String("targetDBName", targetDBName), + zap.String("targetCollectionName", targetCollectionName), + zap.String("partition", partitionBackup2.GetPartitionName())) + _, err := b.restorePartition(ctx, targetDBName, targetCollectionName, partitionBackup2, task, isSameBucket, backupBucketName, backupPath, tempDir) + if err != nil { + log.Error("fail to restore partition", + zap.String("backupCollectionName", task.GetCollBackup().GetCollectionName()), + zap.String("targetDBName", targetDBName), + zap.String("targetCollectionName", targetCollectionName), + zap.String("partition", partitionBackup2.GetPartitionName()), + zap.Error(err)) + return err + } + return err + } + jobId := b.getRestoreWorkerPool().SubmitWithId(job) + jobIds = append(jobIds, jobId) + } + + err = b.getRestoreWorkerPool().WaitJobs(jobIds) + return task, err +} + +func (b *BackupContext) restorePartition(ctx context.Context, targetDBName, targetCollectionName string, + partitionBackup *backuppb.PartitionBackupInfo, task *backuppb.RestoreCollectionTask, isSameBucket bool, backupBucketName string, backupPath string, tempDir string) (*backuppb.RestoreCollectionTask, error) { + exist, err := b.getMilvusClient().HasPartition(ctx, targetDBName, targetCollectionName, partitionBackup.GetPartitionName()) + if err != nil { + log.Error("fail to check has partition", zap.Error(err)) + return task, err + } + if !exist { + err = retry.Do(ctx, func() error { + return b.getMilvusClient().CreatePartition(ctx, targetDBName, targetCollectionName, partitionBackup.GetPartitionName()) + }, retry.Attempts(10), retry.Sleep(1*time.Second)) if err != nil { - log.Error("fail to check has partition", zap.Error(err)) + log.Error("fail to create partition", zap.Error(err)) return task, err } - if !exist { - err = retry.Do(ctx, func() error { - return b.milvusClient.CreatePartition(ctx, targetCollectionName, partitionBackup.GetPartitionName()) - }, retry.Attempts(10), retry.Sleep(1*time.Second)) - if err != nil { - log.Error("fail to create partition", zap.Error(err)) - return task, err + } + log.Info("create partition", + zap.String("collectionName", targetCollectionName), + zap.String("partitionName", partitionBackup.GetPartitionName())) + + // bulk insert + copyAndBulkInsert := func(files []string) error { + realFiles := make([]string, len(files)) + // if milvus bucket and backup bucket are not the same, should copy the data first + if !isSameBucket { + log.Info("milvus bucket and backup bucket are not the same, copy the data first", zap.Strings("files", files)) + for i, file := range files { + // empty delta file, no need to copy + if file == "" { + realFiles[i] = file + } else { + err := b.getStorageClient().Copy(ctx, backupBucketName, b.milvusBucketName, file, tempDir+file) + if err != nil { + log.Error("fail to copy backup date from backup bucket to restore target milvus bucket", zap.Error(err)) + return err + } + realFiles[i] = tempDir + file + } } + } else { + realFiles = files } - log.Info("create partition", - zap.String("collectionName", targetCollectionName), - zap.String("partitionName", partitionBackup.GetPartitionName())) - // bulk insert - groupIds := collectGroupIdsFromSegments(partitionBackup.GetSegmentBackups()) + err = b.executeBulkInsert(ctx, targetDBName, targetCollectionName, partitionBackup.GetPartitionName(), realFiles, int64(task.GetCollBackup().BackupTimestamp)) + if err != nil { + log.Error("fail to bulk insert to partition", + zap.String("backupCollectionName", task.GetCollBackup().GetCollectionName()), + zap.String("targetDBName", targetDBName), + zap.String("targetCollectionName", targetCollectionName), + zap.String("partition", partitionBackup.GetPartitionName()), + zap.Error(err)) + return err + } + return nil + } + if task.GetMetaOnly() { + task.Progress = 100 + } else { + groupIds := collectGroupIdsFromSegments(partitionBackup.GetSegmentBackups()) if len(groupIds) == 1 && groupIds[0] == 0 { // backward compatible old backup without group id files, err := b.getBackupPartitionPaths(ctx, backupBucketName, backupPath, partitionBackup) @@ -347,9 +562,9 @@ func (b BackupContext) executeRestoreCollectionTask(ctx context.Context, backupB zap.String("partition", partitionBackup.GetPartitionName())) return task, err } - err = b.executeBulkInsert(ctx, targetCollectionName, partitionBackup.GetPartitionName(), files, int64(task.GetCollBackup().BackupTimestamp)) + err = copyAndBulkInsert(files) if err != nil { - log.Error("fail to bulk insert to partition", + log.Error("fail to (copy and) bulkinsert data", zap.Error(err), zap.String("backupCollectionName", task.GetCollBackup().GetCollectionName()), zap.String("targetCollectionName", targetCollectionName), @@ -368,9 +583,9 @@ func (b BackupContext) executeRestoreCollectionTask(ctx context.Context, backupB zap.String("partition", partitionBackup.GetPartitionName())) return task, err } - err = b.executeBulkInsert(ctx, targetCollectionName, partitionBackup.GetPartitionName(), files, int64(task.GetCollBackup().BackupTimestamp)) + err = copyAndBulkInsert(files) if err != nil { - log.Error("fail to bulk insert to partition", + log.Error("fail to (copy and) bulkinsert data", zap.Error(err), zap.String("backupCollectionName", task.GetCollBackup().GetCollectionName()), zap.String("targetCollectionName", targetCollectionName), @@ -386,8 +601,7 @@ func (b BackupContext) executeRestoreCollectionTask(ctx context.Context, backupB task.Progress = int32(100 * task.RestoredSize / task.ToRestoreSize) } } - - return task, err + return task, nil } func collectGroupIdsFromSegments(segments []*backuppb.SegmentBackupInfo) []int64 { @@ -402,19 +616,27 @@ func collectGroupIdsFromSegments(segments []*backuppb.SegmentBackupInfo) []int64 return res } -func (b BackupContext) executeBulkInsert(ctx context.Context, coll string, partition string, files []string, endTime int64) error { +func (b *BackupContext) executeBulkInsert(ctx context.Context, db, coll string, partition string, files []string, endTime int64) error { log.Debug("execute bulk insert", + zap.String("db", db), zap.String("collection", coll), zap.String("partition", partition), zap.Strings("files", files), zap.Int64("endTime", endTime)) - taskId, err := b.milvusClient.BulkInsert(ctx, coll, partition, files, gomilvus.IsBackup(), gomilvus.WithEndTs(endTime)) + var taskId int64 + var err error + if endTime == 0 { + taskId, err = b.getMilvusClient().BulkInsert(ctx, db, coll, partition, files, gomilvus.IsBackup()) + } else { + taskId, err = b.getMilvusClient().BulkInsert(ctx, db, coll, partition, files, gomilvus.IsBackup(), gomilvus.WithEndTs(endTime)) + } if err != nil { log.Error("fail to bulk insert", - zap.Error(err), + zap.String("db", db), zap.String("collectionName", coll), zap.String("partitionName", partition), - zap.Strings("files", files)) + zap.Strings("files", files), + zap.Error(err)) return err } err = b.watchBulkInsertState(ctx, taskId, BULKINSERT_TIMEOUT, BULKINSERT_SLEEP_INTERVAL) @@ -429,11 +651,11 @@ func (b BackupContext) executeBulkInsert(ctx context.Context, coll string, parti return nil } -func (b BackupContext) watchBulkInsertState(ctx context.Context, taskId int64, timeout int64, sleepSeconds int) error { +func (b *BackupContext) watchBulkInsertState(ctx context.Context, taskId int64, timeout int64, sleepSeconds int) error { lastProgress := 0 lastUpdateTime := time.Now().Unix() for { - importTaskState, err := b.milvusClient.GetBulkInsertState(ctx, taskId) + importTaskState, err := b.getMilvusClient().GetBulkInsertState(ctx, taskId) currentTimestamp := time.Now().Unix() if err != nil { return err @@ -469,7 +691,7 @@ func (b BackupContext) watchBulkInsertState(ctx context.Context, taskId int64, t return errors.New("import task timeout") } -func (b BackupContext) getBackupPartitionPaths(ctx context.Context, bucketName string, backupPath string, partition *backuppb.PartitionBackupInfo) ([]string, error) { +func (b *BackupContext) getBackupPartitionPaths(ctx context.Context, bucketName string, backupPath string, partition *backuppb.PartitionBackupInfo) ([]string, error) { log.Info("getBackupPartitionPaths", zap.String("bucketName", bucketName), zap.String("backupPath", backupPath), @@ -478,7 +700,7 @@ func (b BackupContext) getBackupPartitionPaths(ctx context.Context, bucketName s insertPath := fmt.Sprintf("%s/%s/%s/%v/%v/", backupPath, BINGLOG_DIR, INSERT_LOG_DIR, partition.GetCollectionId(), partition.GetPartitionId()) deltaPath := fmt.Sprintf("%s/%s/%s/%v/%v/", backupPath, BINGLOG_DIR, DELTA_LOG_DIR, partition.GetCollectionId(), partition.GetPartitionId()) - exist, err := b.storageClient.Exist(ctx, bucketName, deltaPath) + exist, err := b.getStorageClient().Exist(ctx, bucketName, deltaPath) if err != nil { log.Warn("check binlog exist fail", zap.Error(err)) return []string{}, err @@ -489,7 +711,7 @@ func (b BackupContext) getBackupPartitionPaths(ctx context.Context, bucketName s return []string{insertPath, deltaPath}, nil } -func (b BackupContext) getBackupPartitionPathsWithGroupID(ctx context.Context, bucketName string, backupPath string, partition *backuppb.PartitionBackupInfo, groupId int64) ([]string, error) { +func (b *BackupContext) getBackupPartitionPathsWithGroupID(ctx context.Context, bucketName string, backupPath string, partition *backuppb.PartitionBackupInfo, groupId int64) ([]string, error) { log.Info("getBackupPartitionPaths", zap.String("bucketName", bucketName), zap.String("backupPath", backupPath), @@ -499,7 +721,7 @@ func (b BackupContext) getBackupPartitionPathsWithGroupID(ctx context.Context, b insertPath := fmt.Sprintf("%s/%s/%s/%v/%v/%d/", backupPath, BINGLOG_DIR, INSERT_LOG_DIR, partition.GetCollectionId(), partition.GetPartitionId(), groupId) deltaPath := fmt.Sprintf("%s/%s/%s/%v/%v/%d/", backupPath, BINGLOG_DIR, DELTA_LOG_DIR, partition.GetCollectionId(), partition.GetPartitionId(), groupId) - exist, err := b.storageClient.Exist(ctx, bucketName, deltaPath) + exist, err := b.getStorageClient().Exist(ctx, bucketName, deltaPath) if err != nil { log.Warn("check binlog exist fail", zap.Error(err)) return []string{}, err diff --git a/core/backup_meta.go b/core/backup_meta.go index 1fac14d0..f2e5e925 100644 --- a/core/backup_meta.go +++ b/core/backup_meta.go @@ -14,6 +14,7 @@ const ( COLLECTION_META_FILE = "collection_meta.json" PARTITION_META_FILE = "partition_meta.json" SEGMENT_META_FILE = "segment_meta.json" + FULL_META_FILE = "full_meta.json" SEPERATOR = "/" BINGLOG_DIR = "binlogs" @@ -32,6 +33,7 @@ type BackupMetaBytes struct { CollectionMetaBytes []byte PartitionMetaBytes []byte SegmentMetaBytes []byte + FullMetaBytes []byte } type LeveledBackupInfo struct { @@ -98,6 +100,7 @@ func treeToLevel(backup *backuppb.BackupInfo) (LeveledBackupInfo, error) { Progress: backup.GetProgress(), Name: backup.GetName(), BackupTimestamp: backup.GetBackupTimestamp(), + Size: backup.GetSize(), MilvusVersion: backup.GetMilvusVersion(), } @@ -130,12 +133,17 @@ func serialize(backup *backuppb.BackupInfo) (*BackupMetaBytes, error) { if err != nil { return nil, err } + fullMetaBytes, err := json.Marshal(backup) + if err != nil { + return nil, err + } return &BackupMetaBytes{ BackupMetaBytes: backupMetaBytes, CollectionMetaBytes: collectionBackupMetaBytes, PartitionMetaBytes: partitionBackupMetaBytes, SegmentMetaBytes: segmentBackupMetaBytes, + FullMetaBytes: fullMetaBytes, }, nil } @@ -229,6 +237,10 @@ func SegmentMetaPath(backupRootPath, backupName string) string { return BackupMetaDirPath(backupRootPath, backupName) + SEPERATOR + SEGMENT_META_FILE } +func FullMetaPath(backupRootPath, backupName string) string { + return BackupMetaDirPath(backupRootPath, backupName) + SEPERATOR + FULL_META_FILE +} + func BackupBinlogDirPath(backupRootPath, backupName string) string { return backupRootPath + SEPERATOR + backupName + SEPERATOR + BINGLOG_DIR } @@ -260,7 +272,9 @@ func SimpleBackupResponse(input *backuppb.BackupInfoResponse) *backuppb.BackupIn collections = append(collections, &backuppb.CollectionBackupInfo{ StateCode: coll.GetStateCode(), ErrorMessage: coll.GetErrorMessage(), + DbName: coll.GetDbName(), CollectionName: coll.GetCollectionName(), + CollectionId: coll.GetCollectionId(), BackupTimestamp: coll.GetBackupTimestamp(), HasIndex: coll.GetHasIndex(), IndexInfos: coll.GetIndexInfos(), @@ -298,12 +312,16 @@ func SimpleRestoreResponse(input *backuppb.RestoreBackupResponse) *backuppb.Rest collectionRestores := make([]*backuppb.RestoreCollectionTask, 0) for _, coll := range restore.GetCollectionRestoreTasks() { collectionRestores = append(collectionRestores, &backuppb.RestoreCollectionTask{ + Id: coll.GetId(), StateCode: coll.GetStateCode(), ErrorMessage: coll.GetErrorMessage(), StartTime: coll.GetStartTime(), EndTime: coll.GetEndTime(), Progress: coll.GetProgress(), TargetCollectionName: coll.GetTargetCollectionName(), + TargetDbName: coll.GetTargetDbName(), + ToRestoreSize: coll.GetToRestoreSize(), + RestoredSize: coll.GetRestoredSize(), }) } @@ -337,3 +355,5 @@ func UpdateRestoreBackupTask(input *backuppb.RestoreBackupTask) *backuppb.Restor } return input } + +type DbCollections = map[string][]string diff --git a/core/backup_meta_test.go b/core/backup_meta_test.go index 835db848..2119faca 100644 --- a/core/backup_meta_test.go +++ b/core/backup_meta_test.go @@ -1,9 +1,14 @@ package core import ( + "bufio" + "fmt" + "io" + "os" "strconv" "testing" + jsoniter "github.com/json-iterator/go" "github.com/stretchr/testify/assert" "github.com/zilliztech/milvus-backup/core/proto/backuppb" @@ -139,3 +144,90 @@ func TestBackupSerialize(t *testing.T) { deserBackup, err := deserialize(serData) log.Info(deserBackup.String()) } + +func TestDbCollectionJson(t *testing.T) { + dbCollection := DbCollections{"db1": []string{"coll1", "coll2"}, "db2": []string{"coll3", "coll4"}} + jsonStr, err := jsoniter.MarshalToString(dbCollection) + assert.NoError(t, err) + println(jsonStr) + + var dbCollection2 DbCollections + jsoniter.UnmarshalFromString(jsonStr, &dbCollection2) + println(dbCollection2) +} + +func readBackup(backupDir string) (*backuppb.BackupInfo, error) { + readByteFunc := func(filepath string) ([]byte, error) { + file, err := os.OpenFile(filepath, os.O_RDWR, 0666) + if err != nil { + fmt.Println("Open file error!", err) + return nil, err + } + + // Get the file size + stat, err := file.Stat() + if err != nil { + fmt.Println(err) + return nil, err + } + + bs := make([]byte, stat.Size()) + _, err = bufio.NewReader(file).Read(bs) + if err != nil && err != io.EOF { + fmt.Println(err) + return nil, err + } + return bs, nil + } + + backupPath := backupDir + "/backup_meta.json" + collectionPath := backupDir + "/collection_meta.json" + partitionPath := backupDir + "/partition_meta.json" + segmentPath := backupDir + "/segment_meta.json" + + backupMetaBytes, err := readByteFunc(backupPath) + if err != nil { + return nil, err + } + collectionBackupMetaBytes, err := readByteFunc(collectionPath) + if err != nil { + return nil, err + } + partitionBackupMetaBytes, err := readByteFunc(partitionPath) + if err != nil { + return nil, err + } + segmentBackupMetaBytes, err := readByteFunc(segmentPath) + if err != nil { + return nil, err + } + + completeBackupMetas := &BackupMetaBytes{ + BackupMetaBytes: backupMetaBytes, + CollectionMetaBytes: collectionBackupMetaBytes, + PartitionMetaBytes: partitionBackupMetaBytes, + SegmentMetaBytes: segmentBackupMetaBytes, + } + + deserBackup, err := deserialize(completeBackupMetas) + + return deserBackup, err +} + +func TestReadBackupFile(t *testing.T) { + filepath := "/tmp/hxs_meta" + + backupInfo, err := readBackup(filepath) + assert.NoError(t, err) + + levelBackupInfo, err := treeToLevel(backupInfo) + assert.NoError(t, err) + assert.NotNil(t, levelBackupInfo) + + output, _ := serialize(backupInfo) + BackupMetaStr := string(output.BackupMetaBytes) + segmentMetaStr := string(output.SegmentMetaBytes) + fmt.Sprintf(BackupMetaStr) + fmt.Sprintf(segmentMetaStr) + //log.Info("segment meta", zap.String("value", string(output.SegmentMetaBytes))) +} diff --git a/core/backup_server.go b/core/backup_server.go index d6c268da..b402a4bb 100644 --- a/core/backup_server.go +++ b/core/backup_server.go @@ -8,6 +8,7 @@ import ( "github.com/zilliztech/milvus-backup/core/paramtable" "github.com/zilliztech/milvus-backup/core/proto/backuppb" "github.com/zilliztech/milvus-backup/internal/log" + "go.uber.org/zap" "net/http" "net/http/pprof" ) @@ -24,6 +25,8 @@ const ( API_V1_PREFIX = "/api/v1" DOCS_API = "/docs/*any" + + CHECK_API = "/check" ) // Server is the Backup Server @@ -55,7 +58,11 @@ func (s *Server) Init() { func (s *Server) Start() { s.registerProfilePort() - s.engine.Run(s.config.port) + err := s.engine.Run(s.config.port) + if err != nil { + log.Error("Failed to start server", zap.Error(err)) + panic(err) + } log.Info("Start backup server backend") } @@ -105,6 +112,7 @@ func (h *Handlers) RegisterRoutesTo(router gin.IRouter) { router.DELETE(DELETE_BACKUP_API, wrapHandler(h.handleDeleteBackup)) router.POST(RESTORE_BACKUP_API, wrapHandler(h.handleRestoreBackup)) router.GET(GET_RESTORE_API, wrapHandler(h.handleGetRestore)) + router.GET(CHECK_API, wrapHandler(h.handleCheck)) router.GET(DOCS_API, ginSwagger.WrapHandler(swaggerFiles.Handler)) } @@ -129,10 +137,13 @@ func wrapHandler(handle handlerFunc) gin.HandlerFunc { // @Success 200 {object} backuppb.BackupInfoResponse // @Router /create [post] func (h *Handlers) handleCreateBackup(c *gin.Context) (interface{}, error) { - json := backuppb.CreateBackupRequest{} - c.BindJSON(&json) - json.RequestId = c.GetHeader("request_id") - resp := h.backupContext.CreateBackup(h.backupContext.ctx, &json) + requestBody := backuppb.CreateBackupRequest{} + if err := c.ShouldBindJSON(&requestBody); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request body"}) + return nil, nil + } + requestBody.RequestId = c.GetHeader("request_id") + resp := h.backupContext.CreateBackup(h.backupContext.ctx, &requestBody) if h.backupContext.params.HTTPCfg.SimpleResponse { resp = SimpleBackupResponse(resp) } @@ -216,10 +227,15 @@ func (h *Handlers) handleDeleteBackup(c *gin.Context) (interface{}, error) { // @Success 200 {object} backuppb.RestoreBackupResponse // @Router /restore [post] func (h *Handlers) handleRestoreBackup(c *gin.Context) (interface{}, error) { - json := backuppb.RestoreBackupRequest{} - c.BindJSON(&json) - json.RequestId = c.GetHeader("request_id") - resp := h.backupContext.RestoreBackup(h.backupContext.ctx, &json) + requestBody := backuppb.RestoreBackupRequest{} + //c.BindJSON(&json) + if err := c.ShouldBindJSON(&requestBody); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request body"}) + return nil, nil + } + + requestBody.RequestId = c.GetHeader("request_id") + resp := h.backupContext.RestoreBackup(h.backupContext.ctx, &requestBody) if h.backupContext.params.HTTPCfg.SimpleResponse { resp = SimpleRestoreResponse(resp) } @@ -248,3 +264,9 @@ func (h *Handlers) handleGetRestore(c *gin.Context) (interface{}, error) { c.JSON(http.StatusOK, resp) return nil, nil } + +func (h *Handlers) handleCheck(c *gin.Context) (interface{}, error) { + resp := h.backupContext.Check(h.backupContext.ctx) + c.JSON(http.StatusOK, resp) + return nil, nil +} diff --git a/core/etcd_test.go b/core/etcd_test.go deleted file mode 100644 index 9a4c0d10..00000000 --- a/core/etcd_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package core - -import ( - "context" - "fmt" - "github.com/golang/protobuf/proto" - "github.com/zilliztech/milvus-backup/core/proto/backuppb" - clientv3 "go.etcd.io/etcd/client/v3" - "go.uber.org/zap" - "log" - "testing" - "time" -) - -func TestETCDList(t *testing.T) { - // 1. etcd client - // Initialize etcd client - cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"10.102.8.127:2379"}, - //Endpoints: []string{"10.102.10.120:2379"}, - //Endpoints: []string{"10.102.10.139:2379"}, - //Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, - }) - if err != nil { - fmt.Printf("Initialize etcd client failed. err: %v\n", err) - } - //kv := clientv3.NewKV(cli) - - ctx := context.TODO() - - opts := []clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithSerializable()} - getResp, err := cli.Get(ctx, "", opts...) - - for _, kvs := range getResp.Kvs { - log.Println(zap.Any("key", string(kvs.Key)), zap.Any("value", string(kvs.Value))) - } - - //log.Println("getresp", zap.Any("resp", getResp), zap.Any("values", getResp.Kvs)) - cli.Close() -} - -func TestETCDGet(t *testing.T) { - // 1. etcd client - // Initialize etcd client - cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"10.102.8.127:2379"}, - //Endpoints: []string{"10.102.10.120:2379"}, - //Endpoints: []string{"10.102.10.162:2379"}, - //Endpoints: []string{"10.102.10.139:2379"}, - //Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, - }) - if err != nil { - fmt.Printf("Initialize etcd client failed. err: %v\n", err) - } - //kv := clientv3.NewKV(cli) - - ctx := context.TODO() - - //opts := []clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithSerializable()} - //getResp, err := cli.Get(ctx, "by-dev/meta/datacoord-meta/binlog/437433135932575088/437433135932575089/437433135932575098/102") - getResp, err := cli.Get(ctx, "by-dev/meta/datacoord-meta/binlog/437454123484053509/437454123484053510/437454123484253594/0") - - for _, kvs := range getResp.Kvs { - log.Println(zap.Any("key", string(kvs.Key)), zap.Any("value", string(kvs.Value))) - m := &backuppb.FieldBinlog{} - proto.Unmarshal(kvs.Value, m) - log.Println(len(m.Binlogs)) - log.Println(m) - } - - //log.Println("getresp", zap.Any("resp", getResp), zap.Any("values", getResp.Kvs)) - cli.Close() -} diff --git a/core/milvus_sdk_test.go b/core/milvus_sdk_test.go index aad1b78c..ff3cf928 100644 --- a/core/milvus_sdk_test.go +++ b/core/milvus_sdk_test.go @@ -213,7 +213,7 @@ func TestCreateIndex(t *testing.T) { fmt.Println(schema) client.CreateCollection(ctx, schema, 2) - idx, err := entity.NewIndexTRIE(entity.IP) + idx := entity.NewScalarIndex() err = client.CreateIndex(ctx, _COLLECTION_NAME, _STR_FIELD_NAME, idx, false, gomilvus.WithIndexName("_default_idx_102")) fmt.Println(err) @@ -243,76 +243,22 @@ func TestDescribeIndex(t *testing.T) { fmt.Println(err) } -func TestMultiDB(t *testing.T) { +func TestCleanAll(t *testing.T) { ctx := context.Background() - milvusAddr := "localhost:19530" - //c, err := proxy.NewClient(context, milvusAddr) - //assert.NoError(t, err) + milvusAddr := "10.102.9.64:19530" c2, err := gomilvus.NewGrpcClient(ctx, milvusAddr) assert.NoError(t, err) - c2.UsingDatabase(ctx, "wanganyang") - //c2.CreateDatabase(ctx, "wanganyang") - - //_COLLECTION_NAME := "demo_bulk_insert2" - //_ID_FIELD_NAME := "id_field" - //_VECTOR_FIELD_NAME := "float_vector_field" - //_STR_FIELD_NAME := "str_field" - // - //// String field parameter - //_MAX_LENGTH := "65535" - // - //// Vector field parameter - //_DIM := "8" - - //field1 = FieldSchema(name=_ID_FIELD_NAME, dtype=DataType.INT64, description="int64", is_primary=True, auto_id=True) - //field2 = FieldSchema(name=_VECTOR_FIELD_NAME, dtype=DataType.FLOAT_VECTOR, description="float vector", dim=_DIM, - // is_primary=False) - //field3 = FieldSchema(name=_STR_FIELD_NAME, dtype=DataType.VARCHAR, description="string", - // max_length=_MAX_LENGTH, is_primary=False) - //schema = CollectionSchema(fields=[field1, field2, field3], description="collection description") - //collection = Collection(name=_COLLECTION_NAME, data=None, schema=schema) - - //field1 := &entity.Field{ - // Name: _ID_FIELD_NAME, - // DataType: entity.FieldTypeInt64, - // Description: "int64", - // PrimaryKey: true, - // AutoID: true, - //} - //field2 := &entity.Field{ - // Name: _VECTOR_FIELD_NAME, - // DataType: entity.FieldTypeFloatVector, - // Description: "float vector", - // TypeParams: map[string]string{ - // entity.TypeParamDim: _DIM, - // }, - // PrimaryKey: false, - //} - //field3 := &entity.Field{ - // Name: _STR_FIELD_NAME, - // DataType: entity.FieldTypeVarChar, - // Description: "string", - // PrimaryKey: false, - // TypeParams: map[string]string{ - // entity.TypeParamMaxLength: _MAX_LENGTH, - // }, - //} - //schema := &entity.Schema{ - // CollectionName: _COLLECTION_NAME, - // Description: "demo bulkinsert", - // AutoID: true, - // Fields: []*entity.Field{field1, field2, field3}, - //} - ////client.DropCollection(ctx, _COLLECTION_NAME) - //c2.CreateCollection(ctx, schema, 2) + dbs, err := c2.ListDatabases(ctx) - collections, err := c2.ListCollections(ctx) - for _, coll := range collections { - log.Info("collections", zap.Any("coll", coll.Name), zap.Int64("id", coll.ID)) + for _, db := range dbs { + c2.UsingDatabase(ctx, db.Name) + collections, _ := c2.ListCollections(ctx) + for _, coll := range collections { + c2.DropCollection(ctx, coll.Name) + log.Info("collections", zap.Any("coll", coll.Name), zap.Int64("id", coll.ID)) + } + c2.DropDatabase(ctx, db.Name) } - - dbs, err := c2.ListDatabases(ctx) - log.Info("dbs", zap.Any("dbs", dbs)) } diff --git a/core/milvus_sdk_wrapper.go b/core/milvus_sdk_wrapper.go new file mode 100644 index 00000000..84ad672f --- /dev/null +++ b/core/milvus_sdk_wrapper.go @@ -0,0 +1,171 @@ +package core + +import ( + "context" + gomilvus "github.com/milvus-io/milvus-sdk-go/v2/client" + "github.com/milvus-io/milvus-sdk-go/v2/entity" + "github.com/zilliztech/milvus-backup/internal/util/retry" + "sync" + "time" +) + +// MilvusClient wrap db into milvus API to make it thread safe +type MilvusClient struct { + mu sync.Mutex + client gomilvus.Client +} + +func (m *MilvusClient) Close() error { + return m.client.Close() +} + +func (m *MilvusClient) GetVersion(ctx context.Context) (string, error) { + return m.client.GetVersion(ctx) +} + +func (m *MilvusClient) CreateDatabase(ctx context.Context, dbName string) error { + return m.client.CreateDatabase(ctx, dbName) +} + +func (m *MilvusClient) ListDatabases(ctx context.Context) ([]entity.Database, error) { + return m.client.ListDatabases(ctx) +} + +func (m *MilvusClient) DescribeCollection(ctx context.Context, db, collName string) (*entity.Collection, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return nil, err + } + return m.client.DescribeCollection(ctx, collName) +} + +func (m *MilvusClient) DescribeIndex(ctx context.Context, db, collName, fieldName string) ([]entity.Index, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return nil, err + } + return m.client.DescribeIndex(ctx, collName, fieldName) +} + +func (m *MilvusClient) ShowPartitions(ctx context.Context, db, collName string) ([]*entity.Partition, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return nil, err + } + return m.client.ShowPartitions(ctx, collName) +} + +func (m *MilvusClient) GetLoadingProgress(ctx context.Context, db, collName string, partitionNames []string) (int64, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return 0, err + } + return m.client.GetLoadingProgress(ctx, collName, partitionNames) +} + +func (m *MilvusClient) GetPersistentSegmentInfo(ctx context.Context, db, collName string) ([]*entity.Segment, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return nil, err + } + return m.client.GetPersistentSegmentInfo(ctx, collName) +} + +func (m *MilvusClient) FlushV2(ctx context.Context, db, collName string, async bool) ([]int64, []int64, int64, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return nil, nil, 0, err + } + return m.client.FlushV2(ctx, collName, async) +} + +func (m *MilvusClient) ListCollections(ctx context.Context, db string) ([]*entity.Collection, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return nil, err + } + return m.client.ListCollections(ctx) +} + +func (m *MilvusClient) HasCollection(ctx context.Context, db, collName string) (bool, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return false, err + } + return m.client.HasCollection(ctx, collName) +} + +func (m *MilvusClient) BulkInsert(ctx context.Context, db, collName string, partitionName string, files []string, opts ...gomilvus.BulkInsertOption) (int64, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return 0, err + } + return m.client.BulkInsert(ctx, collName, partitionName, files, opts...) +} + +func (m *MilvusClient) GetBulkInsertState(ctx context.Context, taskID int64) (*entity.BulkInsertTaskState, error) { + return m.client.GetBulkInsertState(ctx, taskID) +} + +func (m *MilvusClient) CreateCollection(ctx context.Context, db string, schema *entity.Schema, shardsNum int32, opts ...gomilvus.CreateCollectionOption) error { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return err + } + // add retry to make sure won't be block by rate control + return retry.Do(ctx, func() error { + return m.client.CreateCollection(ctx, schema, shardsNum, opts...) + }, retry.Sleep(2*time.Second), retry.Attempts(10)) +} + +func (m *MilvusClient) CreatePartition(ctx context.Context, db, collName string, partitionName string) error { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return err + } + return retry.Do(ctx, func() error { + return m.client.CreatePartition(ctx, collName, partitionName) + }, retry.Sleep(2*time.Second), retry.Attempts(10)) +} + +func (m *MilvusClient) HasPartition(ctx context.Context, db, collName string, partitionName string) (bool, error) { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return false, err + } + return m.client.HasPartition(ctx, collName, partitionName) +} + +func (m *MilvusClient) CreateIndex(ctx context.Context, db, collName string, fieldName string, idx entity.Index, async bool, opts ...gomilvus.IndexOption) error { + m.mu.Lock() + defer m.mu.Unlock() + err := m.client.UsingDatabase(ctx, db) + if err != nil { + return err + } + return m.client.CreateIndex(ctx, collName, fieldName, idx, async, opts...) +} diff --git a/core/milvus_source.go b/core/milvus_source.go deleted file mode 100644 index 0e1bd370..00000000 --- a/core/milvus_source.go +++ /dev/null @@ -1,21 +0,0 @@ -package core - -import "github.com/zilliztech/milvus-backup/core/paramtable" - -type MilvusSource struct { - params paramtable.BackupParams - proxyAddr string - //datacoordAddr string -} - -func (m *MilvusSource) GetProxyAddr() string { - return m.proxyAddr -} - -//func (m *MilvusSource) GetDatacoordAddr() string { -// return m.datacoordAddr -//} - -func (m *MilvusSource) GetParams() paramtable.BackupParams { - return m.params -} diff --git a/core/paramtable/base_table.go b/core/paramtable/base_table.go index d5a473b3..b73e2985 100644 --- a/core/paramtable/base_table.go +++ b/core/paramtable/base_table.go @@ -52,6 +52,15 @@ const ( DefaultMinioBackupBucketName = "a-bucket" DefaultMinioBackupRootPath = "backup" + + DefaultStorageType = "minio" + + DefaultMilvusAddress = "localhost" + DefaultMilvusPort = "19530" + DefaultMilvusAuthorizationEnabled = "false" + DefaultMilvusTlsMode = "0" + DefaultMilvusUser = "root" + DefaultMilvusPassword = "Milvus" ) var defaultYaml = DefaultBackupYaml @@ -135,6 +144,7 @@ func (gp *BaseTable) loadFromYaml(file string) { func (gp *BaseTable) tryLoadFromEnv() { gp.loadMinioConfig() + gp.loadMilvusConfig() } // Load loads an object with @key. @@ -360,7 +370,7 @@ func (gp *BaseTable) InitLogCfg() { gp.Log = log.Config{} format := gp.LoadWithDefault("log.format", "text") gp.Log.Format = format - level := gp.LoadWithDefault("log.level", "debug") + level := gp.LoadWithDefault("log.level", "info") gp.Log.Level = level gp.Log.Console = gp.ParseBool("log.console", false) gp.Log.File.Filename = gp.LoadWithDefault("log.file.rootPath", "backup.log") @@ -403,46 +413,89 @@ func (gp *BaseTable) SetLogger() { func (gp *BaseTable) loadMinioConfig() { minioAddress := os.Getenv("MINIO_ADDRESS") - if minioAddress == "" { - minioHost := gp.LoadWithDefault("minio.address", DefaultMinioAddress) - port := gp.LoadWithDefault("minio.port", DefaultMinioPort) - minioAddress = minioHost + ":" + port + if minioAddress != "" { + _ = gp.Save("minio.address", minioAddress) + } + + minioPort := os.Getenv("MINIO_PORT") + if minioPort != "" { + _ = gp.Save("minio.port", minioPort) } - gp.Save("_MinioAddress", minioAddress) minioAccessKey := os.Getenv("MINIO_ACCESS_KEY") - if minioAccessKey == "" { - minioAccessKey = gp.LoadWithDefault("minio.accessKeyID", DefaultMinioAccessKey) + if minioAccessKey != "" { + _ = gp.Save("minio.accessKeyID", minioAccessKey) } - gp.Save("_MinioAccessKeyID", minioAccessKey) minioSecretKey := os.Getenv("MINIO_SECRET_KEY") - if minioSecretKey == "" { - minioSecretKey = gp.LoadWithDefault("minio.secretAccessKey", DefaultMinioSecretAccessKey) + if minioSecretKey != "" { + _ = gp.Save("minio.secretAccessKey", minioSecretKey) } - gp.Save("_MinioSecretAccessKey", minioSecretKey) minioUseSSL := os.Getenv("MINIO_USE_SSL") - if minioUseSSL == "" { - minioUseSSL = gp.LoadWithDefault("minio.useSSL", DefaultMinioUseSSL) + if minioUseSSL != "" { + _ = gp.Save("minio.useSSL", minioUseSSL) } - gp.Save("_MinioUseSSL", minioUseSSL) minioBucketName := os.Getenv("MINIO_BUCKET_NAME") - if minioBucketName == "" { - minioBucketName = gp.LoadWithDefault("minio.bucketName", DefaultMinioBucketName) + if minioBucketName != "" { + _ = gp.Save("minio.bucketName", minioBucketName) } - gp.Save("_MinioBucketName", minioBucketName) minioUseIAM := os.Getenv("MINIO_USE_IAM") - if minioUseIAM == "" { - minioUseIAM = gp.LoadWithDefault("minio.useIAM", DefaultMinioUseIAM) + if minioUseIAM != "" { + _ = gp.Save("minio.useIAM", minioUseIAM) } - gp.Save("_MinioUseIAM", minioUseIAM) minioIAMEndpoint := os.Getenv("MINIO_IAM_ENDPOINT") - if minioIAMEndpoint == "" { - minioIAMEndpoint = gp.LoadWithDefault("minio.iamEndpoint", DefaultMinioIAMEndpoint) + if minioIAMEndpoint != "" { + _ = gp.Save("minio.iamEndpoint", minioIAMEndpoint) + } + + minioRootPath := os.Getenv("MINIO_ROOT_PATH") + if minioRootPath != "" { + _ = gp.Save("minio.rootPath", minioRootPath) + } + + minioBackupBucketName := os.Getenv("MINIO_BACKUP_BUCKET_NAME") + if minioBackupBucketName != "" { + _ = gp.Save("minio.backupBucketName", minioBackupBucketName) + } + + minioBackupRootPath := os.Getenv("MINIO_BACKUP_ROOT_PATH") + if minioBackupRootPath != "" { + _ = gp.Save("minio.backupRootPath", minioBackupRootPath) + } +} + +func (gp *BaseTable) loadMilvusConfig() { + milvusAddress := os.Getenv("MILVUS_ADDRESS") + if milvusAddress != "" { + _ = gp.Save("milvus.address", milvusAddress) + } + + milvusPort := os.Getenv("MILVUS_PORT") + if milvusPort != "" { + _ = gp.Save("milvus.port", milvusPort) + } + + milvusAuthorizationEnabled := os.Getenv("MILVUS_AUTHORIZATION_ENABLED") + if milvusAuthorizationEnabled != "" { + _ = gp.Save("milvus.authorizationEnabled", milvusAuthorizationEnabled) + } + + milvusTlsMode := os.Getenv("MILVUS_TLS_MODE") + if milvusTlsMode != "" { + _ = gp.Save("milvus.tlsMode", milvusTlsMode) + } + + milvusUser := os.Getenv("MILVUS_USER") + if milvusUser != "" { + _ = gp.Save("milvus.user", milvusUser) + } + + milvusPassword := os.Getenv("MILVUS_PASSWORD") + if milvusPassword != "" { + _ = gp.Save("milvus.password", milvusPassword) } - gp.Save("_MinioIAMEndpoint", minioIAMEndpoint) } diff --git a/core/paramtable/params.go b/core/paramtable/params.go index ec7c3242..4befad13 100644 --- a/core/paramtable/params.go +++ b/core/paramtable/params.go @@ -33,12 +33,19 @@ type BackupConfig struct { Base *BaseTable MaxSegmentGroupSize int64 + + BackupParallelism int + RestoreParallelism int + BackupCopyDataParallelism int } func (p *BackupConfig) init(base *BaseTable) { p.Base = base p.initMaxSegmentGroupSize() + p.initBackupParallelism() + p.initRestoreParallelism() + p.initBackupCopyDataParallelism() } func (p *BackupConfig) initMaxSegmentGroupSize() { @@ -49,6 +56,21 @@ func (p *BackupConfig) initMaxSegmentGroupSize() { p.MaxSegmentGroupSize = size } +func (p *BackupConfig) initBackupParallelism() { + size := p.Base.ParseIntWithDefault("backup.parallelism", 1) + p.BackupParallelism = size +} + +func (p *BackupConfig) initRestoreParallelism() { + size := p.Base.ParseIntWithDefault("restore.parallelism", 1) + p.RestoreParallelism = size +} + +func (p *BackupConfig) initBackupCopyDataParallelism() { + size := p.Base.ParseIntWithDefault("backup.copydata.parallelism", 10) + p.BackupCopyDataParallelism = size +} + type MilvusConfig struct { Base *BaseTable @@ -114,13 +136,19 @@ func (p *MilvusConfig) initTLSMode() { // ///////////////////////////////////////////////////////////////////////////// // --- minio --- const ( - CloudProviderAWS = "aws" - CloudProviderGCP = "gcp" + Minio = "minio" + CloudProviderAWS = "aws" + CloudProviderGCP = "gcp" + CloudProviderAliyun = "ali" + CloudProviderAzure = "azure" ) var supportedCloudProvider = map[string]bool{ - CloudProviderAWS: true, - CloudProviderGCP: true, + Minio: true, + CloudProviderAWS: true, + CloudProviderGCP: true, + CloudProviderAliyun: true, + CloudProviderAzure: true, } type MinioConfig struct { @@ -139,11 +167,14 @@ type MinioConfig struct { BackupBucketName string BackupRootPath string + + StorageType string } func (p *MinioConfig) init(base *BaseTable) { p.Base = base + p.initStorageType() p.initAddress() p.initPort() p.initAccessKeyID() @@ -170,10 +201,7 @@ func (p *MinioConfig) initPort() { } func (p *MinioConfig) initAccessKeyID() { - keyID, err := p.Base.Load("minio.accessKeyID") - if err != nil { - panic(err) - } + keyID := p.Base.LoadWithDefault("minio.accessKeyID", DefaultMinioAccessKey) p.AccessKeyID = keyID } @@ -228,6 +256,15 @@ func (p *MinioConfig) initBackupRootPath() { p.BackupRootPath = rootPath } +func (p *MinioConfig) initStorageType() { + engine := p.Base.LoadWithDefault("storage.type", + p.Base.LoadWithDefault("minio.type", DefaultStorageType)) + if !supportedCloudProvider[engine] { + panic("unsupported storage type:" + engine) + } + p.StorageType = engine +} + type HTTPConfig struct { Base *BaseTable diff --git a/core/paramtable/params_test.go b/core/paramtable/params_test.go new file mode 100644 index 00000000..77adadd8 --- /dev/null +++ b/core/paramtable/params_test.go @@ -0,0 +1,15 @@ +package paramtable + +import ( + "testing" +) + +func TestRootPathParams(t *testing.T) { + var params BackupParams + params.GlobalInitWithYaml("backup.yaml") + params.Init() + + //cfg := &MinioConfig{} + //cfg.initRootPath() + println(params.MinioCfg.RootPath) +} diff --git a/core/proto/backup.proto b/core/proto/backup.proto index 00d11c27..372ca3a2 100644 --- a/core/proto/backup.proto +++ b/core/proto/backup.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package milvus.proto.backup; +import "google/protobuf/struct.proto"; + option go_package="github.com/zilliztech/milvus-backup/core/proto/backuppb"; service MilvusBackupService { @@ -16,6 +18,8 @@ service MilvusBackupService { rpc RestoreBackup(RestoreBackupRequest) returns (RestoreBackupResponse) {} // Get restore state by given id rpc GetRestore(GetRestoreStateRequest) returns (RestoreBackupResponse) {} + // Check connections + rpc Check(CheckRequest) returns (CheckResponse) {} } enum ResponseCode { @@ -141,6 +145,10 @@ message CreateBackupRequest { repeated string collection_names = 3; // async or not bool async = 4; + // database and collections to backup. A json string. To support database. 2023.7.7 + google.protobuf.Value db_collections = 5; + // force backup skip flush, Should make sure data has been stored into disk when using it + bool force = 6; } /** @@ -168,6 +176,8 @@ message GetBackupRequest { string bucket_name = 4; // if bucket_name and path is set. will override bucket/path in config. string path = 5; + // if true, return simple response without too much detail to display + bool without_detail = 6; } message ListBackupsRequest { @@ -239,6 +249,12 @@ message RestoreBackupRequest { string bucket_name = 7; // if bucket_name and path is set. will override bucket/path in config. string path = 8; + // database and collections to restore. A json string. To support database. 2023.7.7 + google.protobuf.Value db_collections = 9; + // if true only restore meta + bool metaOnly = 10; + // if true restore index info + bool restoreIndex = 11; } message RestorePartitionTask { @@ -263,6 +279,11 @@ message RestoreCollectionTask { int64 restored_size = 9; int64 to_restore_size = 10; int32 progress = 11; + string target_db_name = 12; + // if true only restore meta + bool metaOnly = 13; + // if true restore index info + bool restoreIndex = 14; } message RestoreBackupTask { @@ -394,4 +415,14 @@ message CollectionSchema { bool autoID = 3; // deprecated later, keep compatible with c++ part now repeated FieldSchema fields = 4; bool enable_dynamic_field = 5; // mark whether this table has the dynamic field function enabled. -} \ No newline at end of file +} + +message CheckRequest { +} + +message CheckResponse { + // response code. 0 means success. others are fail + ResponseCode code = 1; + // error msg if fail + string msg = 2; +} diff --git a/core/proto/backuppb/backup.pb.go b/core/proto/backuppb/backup.pb.go index 124e008d..3e6b3b74 100644 --- a/core/proto/backuppb/backup.pb.go +++ b/core/proto/backuppb/backup.pb.go @@ -7,6 +7,7 @@ import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" + _struct "github.com/golang/protobuf/ptypes/struct" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -945,7 +946,11 @@ type CreateBackupRequest struct { // collection names to backup, empty to backup all CollectionNames []string `protobuf:"bytes,3,rep,name=collection_names,json=collectionNames,proto3" json:"collection_names,omitempty"` // async or not - Async bool `protobuf:"varint,4,opt,name=async,proto3" json:"async,omitempty"` + Async bool `protobuf:"varint,4,opt,name=async,proto3" json:"async,omitempty"` + // database and collections to backup. A json string. To support database. 2023.7.7 + DbCollections *_struct.Value `protobuf:"bytes,5,opt,name=db_collections,json=dbCollections,proto3" json:"db_collections,omitempty"` + // force backup skip flush, Should make sure data has been stored into disk when using it + Force bool `protobuf:"varint,6,opt,name=force,proto3" json:"force,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1004,6 +1009,20 @@ func (m *CreateBackupRequest) GetAsync() bool { return false } +func (m *CreateBackupRequest) GetDbCollections() *_struct.Value { + if m != nil { + return m.DbCollections + } + return nil +} + +func (m *CreateBackupRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + //* // BackupInfoResponse type BackupInfoResponse struct { @@ -1083,7 +1102,9 @@ type GetBackupRequest struct { // if bucket_name and path is set. will override bucket/path in config. BucketName string `protobuf:"bytes,4,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"` // if bucket_name and path is set. will override bucket/path in config. - Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + // if true, return simple response without too much detail to display + WithoutDetail bool `protobuf:"varint,6,opt,name=without_detail,json=withoutDetail,proto3" json:"without_detail,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1149,6 +1170,13 @@ func (m *GetBackupRequest) GetPath() string { return "" } +func (m *GetBackupRequest) GetWithoutDetail() bool { + if m != nil { + return m.WithoutDetail + } + return false +} + type ListBackupsRequest struct { // uuid of request, will generate one if not set RequestId string `protobuf:"bytes,1,opt,name=requestId,proto3" json:"requestId,omitempty"` @@ -1390,7 +1418,13 @@ type RestoreBackupRequest struct { // if bucket_name and path is set. will override bucket/path in config. BucketName string `protobuf:"bytes,7,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"` // if bucket_name and path is set. will override bucket/path in config. - Path string `protobuf:"bytes,8,opt,name=path,proto3" json:"path,omitempty"` + Path string `protobuf:"bytes,8,opt,name=path,proto3" json:"path,omitempty"` + // database and collections to restore. A json string. To support database. 2023.7.7 + DbCollections *_struct.Value `protobuf:"bytes,9,opt,name=db_collections,json=dbCollections,proto3" json:"db_collections,omitempty"` + // if true only restore meta + MetaOnly bool `protobuf:"varint,10,opt,name=metaOnly,proto3" json:"metaOnly,omitempty"` + // if true restore index info + RestoreIndex bool `protobuf:"varint,11,opt,name=restoreIndex,proto3" json:"restoreIndex,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1477,6 +1511,27 @@ func (m *RestoreBackupRequest) GetPath() string { return "" } +func (m *RestoreBackupRequest) GetDbCollections() *_struct.Value { + if m != nil { + return m.DbCollections + } + return nil +} + +func (m *RestoreBackupRequest) GetMetaOnly() bool { + if m != nil { + return m.MetaOnly + } + return false +} + +func (m *RestoreBackupRequest) GetRestoreIndex() bool { + if m != nil { + return m.RestoreIndex + } + return false +} + type RestorePartitionTask struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` StateCode RestoreTaskStateCode `protobuf:"varint,2,opt,name=state_code,json=stateCode,proto3,enum=milvus.proto.backup.RestoreTaskStateCode" json:"state_code,omitempty"` @@ -1576,9 +1631,14 @@ type RestoreCollectionTask struct { RestoredSize int64 `protobuf:"varint,9,opt,name=restored_size,json=restoredSize,proto3" json:"restored_size"` ToRestoreSize int64 `protobuf:"varint,10,opt,name=to_restore_size,json=toRestoreSize,proto3" json:"to_restore_size"` Progress int32 `protobuf:"varint,11,opt,name=progress,proto3" json:"progress,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + TargetDbName string `protobuf:"bytes,12,opt,name=target_db_name,json=targetDbName,proto3" json:"target_db_name,omitempty"` + // if true only restore meta + MetaOnly bool `protobuf:"varint,13,opt,name=metaOnly,proto3" json:"metaOnly,omitempty"` + // if true restore index info + RestoreIndex bool `protobuf:"varint,14,opt,name=restoreIndex,proto3" json:"restoreIndex,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *RestoreCollectionTask) Reset() { *m = RestoreCollectionTask{} } @@ -1683,6 +1743,27 @@ func (m *RestoreCollectionTask) GetProgress() int32 { return 0 } +func (m *RestoreCollectionTask) GetTargetDbName() string { + if m != nil { + return m.TargetDbName + } + return "" +} + +func (m *RestoreCollectionTask) GetMetaOnly() bool { + if m != nil { + return m.MetaOnly + } + return false +} + +func (m *RestoreCollectionTask) GetRestoreIndex() bool { + if m != nil { + return m.RestoreIndex + } + return false +} + type RestoreBackupTask struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` StateCode RestoreTaskStateCode `protobuf:"varint,2,opt,name=state_code,json=stateCode,proto3,enum=milvus.proto.backup.RestoreTaskStateCode" json:"state_code,omitempty"` @@ -2434,6 +2515,86 @@ func (m *CollectionSchema) GetEnableDynamicField() bool { return false } +type CheckRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CheckRequest) Reset() { *m = CheckRequest{} } +func (m *CheckRequest) String() string { return proto.CompactTextString(m) } +func (*CheckRequest) ProtoMessage() {} +func (*CheckRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_65240d19de191688, []int{27} +} + +func (m *CheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CheckRequest.Unmarshal(m, b) +} +func (m *CheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CheckRequest.Marshal(b, m, deterministic) +} +func (m *CheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckRequest.Merge(m, src) +} +func (m *CheckRequest) XXX_Size() int { + return xxx_messageInfo_CheckRequest.Size(m) +} +func (m *CheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CheckRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckRequest proto.InternalMessageInfo + +type CheckResponse struct { + // response code. 0 means success. others are fail + Code ResponseCode `protobuf:"varint,1,opt,name=code,proto3,enum=milvus.proto.backup.ResponseCode" json:"code,omitempty"` + // error msg if fail + Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CheckResponse) Reset() { *m = CheckResponse{} } +func (m *CheckResponse) String() string { return proto.CompactTextString(m) } +func (*CheckResponse) ProtoMessage() {} +func (*CheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_65240d19de191688, []int{28} +} + +func (m *CheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CheckResponse.Unmarshal(m, b) +} +func (m *CheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CheckResponse.Marshal(b, m, deterministic) +} +func (m *CheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckResponse.Merge(m, src) +} +func (m *CheckResponse) XXX_Size() int { + return xxx_messageInfo_CheckResponse.Size(m) +} +func (m *CheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckResponse proto.InternalMessageInfo + +func (m *CheckResponse) GetCode() ResponseCode { + if m != nil { + return m.Code + } + return ResponseCode_Success +} + +func (m *CheckResponse) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + func init() { proto.RegisterEnum("milvus.proto.backup.ResponseCode", ResponseCode_name, ResponseCode_value) proto.RegisterEnum("milvus.proto.backup.BackupTaskStateCode", BackupTaskStateCode_name, BackupTaskStateCode_value) @@ -2470,169 +2631,183 @@ func init() { proto.RegisterType((*ValueField)(nil), "milvus.proto.backup.ValueField") proto.RegisterType((*FieldSchema)(nil), "milvus.proto.backup.FieldSchema") proto.RegisterType((*CollectionSchema)(nil), "milvus.proto.backup.CollectionSchema") + proto.RegisterType((*CheckRequest)(nil), "milvus.proto.backup.CheckRequest") + proto.RegisterType((*CheckResponse)(nil), "milvus.proto.backup.CheckResponse") } func init() { proto.RegisterFile("backup.proto", fileDescriptor_65240d19de191688) } var fileDescriptor_65240d19de191688 = []byte{ - // 2507 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcd, 0x73, 0x23, 0x47, - 0x15, 0xf7, 0x48, 0xb2, 0xa4, 0x79, 0x23, 0xdb, 0xe3, 0xb6, 0xe3, 0x68, 0xbd, 0x6c, 0xd6, 0x2b, - 0x48, 0xe2, 0x75, 0x0a, 0x2f, 0x38, 0x1f, 0x24, 0x5b, 0x90, 0x0f, 0x7f, 0xed, 0x8a, 0xdd, 0x78, - 0x5d, 0x23, 0xaf, 0x2b, 0x15, 0x3e, 0xa6, 0x46, 0x9a, 0xb6, 0x3c, 0xec, 0x68, 0x5a, 0x4c, 0xb7, - 0x9c, 0x68, 0xab, 0xe0, 0x46, 0x15, 0x47, 0x0e, 0x70, 0xa1, 0xe0, 0xc4, 0x89, 0x13, 0x50, 0x14, - 0x17, 0xfe, 0x06, 0x2e, 0xfc, 0x15, 0x14, 0x7f, 0x41, 0xae, 0x54, 0xbf, 0xee, 0x19, 0x8d, 0xe4, - 0xb1, 0x2d, 0x57, 0xa5, 0x58, 0xc2, 0xad, 0xe7, 0xd7, 0xef, 0xbd, 0xee, 0x7e, 0x5f, 0xfd, 0xfa, - 0x0d, 0xd4, 0xda, 0x5e, 0xe7, 0xd9, 0xa0, 0xbf, 0xd9, 0x8f, 0x99, 0x60, 0x64, 0xa9, 0x17, 0x84, - 0x67, 0x03, 0xae, 0xbe, 0x36, 0xd5, 0x54, 0xe3, 0x5f, 0x06, 0x98, 0xcd, 0xc8, 0xa7, 0x9f, 0x37, - 0xa3, 0x13, 0x46, 0x6e, 0x01, 0x9c, 0x04, 0x34, 0xf4, 0xdd, 0xc8, 0xeb, 0xd1, 0xba, 0xb1, 0x66, - 0xac, 0x9b, 0x8e, 0x89, 0xc8, 0x81, 0xd7, 0xa3, 0x72, 0x3a, 0x90, 0xb4, 0x6a, 0xba, 0xa0, 0xa6, - 0x11, 0x19, 0x9f, 0x16, 0xc3, 0x3e, 0xad, 0x17, 0x33, 0xd3, 0x47, 0xc3, 0x3e, 0x25, 0xdb, 0x50, - 0xee, 0x7b, 0xb1, 0xd7, 0xe3, 0xf5, 0xd2, 0x5a, 0x71, 0xdd, 0xda, 0xda, 0xd8, 0xcc, 0xd9, 0xd0, - 0x66, 0xba, 0x99, 0xcd, 0x43, 0x24, 0xde, 0x8b, 0x44, 0x3c, 0x74, 0x34, 0xe7, 0xea, 0x7b, 0x60, - 0x65, 0x60, 0x62, 0x43, 0xf1, 0x19, 0x1d, 0xea, 0x8d, 0xca, 0x21, 0x59, 0x86, 0xd9, 0x33, 0x2f, - 0x1c, 0x24, 0xbb, 0x53, 0x1f, 0xf7, 0x0b, 0xef, 0x1a, 0x8d, 0x7f, 0x96, 0x61, 0x79, 0x87, 0x85, - 0x21, 0xed, 0x88, 0x80, 0x45, 0xdb, 0xb8, 0x1a, 0x1e, 0x7a, 0x1e, 0x0a, 0x81, 0xaf, 0x65, 0x14, - 0x02, 0x9f, 0x3c, 0x00, 0xe0, 0xc2, 0x13, 0xd4, 0xed, 0x30, 0x5f, 0xc9, 0x99, 0xdf, 0x5a, 0xcf, - 0xdd, 0xab, 0x12, 0x72, 0xe4, 0xf1, 0x67, 0x2d, 0xc9, 0xb0, 0xc3, 0x7c, 0xea, 0x98, 0x3c, 0x19, - 0x92, 0x06, 0xd4, 0x68, 0x1c, 0xb3, 0xf8, 0x63, 0xca, 0xb9, 0xd7, 0x4d, 0x34, 0x32, 0x86, 0x49, - 0x9d, 0x71, 0xe1, 0xc5, 0xc2, 0x15, 0x41, 0x8f, 0xd6, 0x4b, 0x6b, 0xc6, 0x7a, 0x11, 0x45, 0xc4, - 0xe2, 0x28, 0xe8, 0x51, 0x72, 0x03, 0xaa, 0x34, 0xf2, 0xd5, 0xe4, 0x2c, 0x4e, 0x56, 0x68, 0xe4, - 0xe3, 0xd4, 0x2a, 0x54, 0xfb, 0x31, 0xeb, 0xc6, 0x94, 0xf3, 0x7a, 0x79, 0xcd, 0x58, 0x9f, 0x75, - 0xd2, 0x6f, 0xf2, 0x75, 0x98, 0xeb, 0xa4, 0x47, 0x75, 0x03, 0xbf, 0x5e, 0x41, 0xde, 0xda, 0x08, - 0x6c, 0xfa, 0xe4, 0x65, 0xa8, 0xf8, 0x6d, 0x65, 0xca, 0x2a, 0xee, 0xac, 0xec, 0xb7, 0xd1, 0x8e, - 0xaf, 0xc3, 0x42, 0x86, 0x1b, 0x09, 0x4c, 0x24, 0x98, 0x1f, 0xc1, 0x48, 0xf8, 0x3d, 0x28, 0xf3, - 0xce, 0x29, 0xed, 0x79, 0x75, 0x58, 0x33, 0xd6, 0xad, 0xad, 0x57, 0x73, 0xb5, 0x34, 0x52, 0x7a, - 0x0b, 0x89, 0x1d, 0xcd, 0x84, 0x67, 0x3f, 0xf5, 0x62, 0x9f, 0xbb, 0xd1, 0xa0, 0x57, 0xb7, 0xf0, - 0x0c, 0xa6, 0x42, 0x0e, 0x06, 0x3d, 0xe2, 0xc0, 0x62, 0x87, 0x45, 0x3c, 0xe0, 0x82, 0x46, 0x9d, - 0xa1, 0x1b, 0xd2, 0x33, 0x1a, 0xd6, 0x6b, 0x68, 0x8e, 0x8b, 0x16, 0x4a, 0xa9, 0x1f, 0x4b, 0x62, - 0xc7, 0xee, 0x4c, 0x20, 0xe4, 0x29, 0x2c, 0xf6, 0xbd, 0x58, 0x04, 0x78, 0x32, 0xc5, 0xc6, 0xeb, - 0x73, 0xe8, 0x8e, 0xf9, 0x26, 0x3e, 0x4c, 0xa8, 0x47, 0x0e, 0xe3, 0xd8, 0xfd, 0x71, 0x90, 0x93, - 0xbb, 0x60, 0x2b, 0x7a, 0xb4, 0x14, 0x17, 0x5e, 0xaf, 0x5f, 0x9f, 0x5f, 0x33, 0xd6, 0x4b, 0xce, - 0x82, 0xc2, 0x8f, 0x12, 0x98, 0x10, 0x28, 0xf1, 0xe0, 0x39, 0xad, 0x2f, 0xa0, 0x45, 0x70, 0x4c, - 0x6e, 0x82, 0x79, 0xea, 0x71, 0x17, 0x43, 0xa5, 0x6e, 0xaf, 0x19, 0xeb, 0x55, 0xa7, 0x7a, 0xea, - 0x71, 0x0c, 0x05, 0xf2, 0x01, 0x58, 0x2a, 0xaa, 0x82, 0xe8, 0x84, 0xf1, 0xfa, 0x22, 0x6e, 0xf6, - 0x95, 0xcb, 0x63, 0xc7, 0x51, 0x81, 0x28, 0x87, 0x5c, 0xaa, 0x39, 0x64, 0x9e, 0xef, 0xa2, 0x63, - 0xd6, 0x89, 0x0a, 0x4b, 0x89, 0xa0, 0xd3, 0x92, 0xfb, 0x70, 0x43, 0xef, 0xbd, 0x7f, 0x3a, 0xe4, - 0x41, 0xc7, 0x0b, 0x33, 0x87, 0x58, 0xc2, 0x43, 0xbc, 0xac, 0x08, 0x0e, 0xf5, 0x7c, 0x7a, 0x98, - 0xc6, 0x2f, 0x0b, 0xb0, 0x94, 0xa3, 0x21, 0x72, 0x07, 0x6a, 0x23, 0x35, 0xeb, 0xe0, 0x2a, 0x3a, - 0x56, 0x8a, 0x35, 0x7d, 0xf2, 0x2a, 0xcc, 0x8f, 0x48, 0x32, 0xf9, 0x64, 0x2e, 0x45, 0xd1, 0xc5, - 0xce, 0x79, 0x72, 0x31, 0xc7, 0x93, 0x9f, 0xc0, 0x02, 0xa7, 0xdd, 0x1e, 0x8d, 0x44, 0x6a, 0x53, - 0x95, 0x62, 0x5e, 0xcb, 0x55, 0x53, 0x4b, 0xd1, 0x66, 0x2c, 0x3a, 0xcf, 0xb3, 0x10, 0x4f, 0x8d, - 0x34, 0x9b, 0x31, 0xd2, 0xb8, 0x1a, 0xcb, 0x13, 0x6a, 0x6c, 0xfc, 0xa2, 0x08, 0x8b, 0xe7, 0x04, - 0xa3, 0x8b, 0xeb, 0x9d, 0xa5, 0x6a, 0x30, 0x35, 0xd2, 0xf4, 0xcf, 0x9f, 0xae, 0x90, 0x73, 0xba, - 0x49, 0x65, 0x16, 0xcf, 0x2b, 0xf3, 0x15, 0xb0, 0xa2, 0x41, 0xcf, 0x65, 0x27, 0x6e, 0xcc, 0x3e, - 0xe3, 0x49, 0x1a, 0x89, 0x06, 0xbd, 0x27, 0x27, 0x0e, 0xfb, 0x8c, 0x93, 0xfb, 0x50, 0x69, 0x07, - 0x51, 0xc8, 0xba, 0xbc, 0x3e, 0x8b, 0x8a, 0x59, 0xcb, 0x55, 0xcc, 0xbe, 0xcc, 0xf4, 0xdb, 0x48, - 0xe8, 0x24, 0x0c, 0xe4, 0x7d, 0xc0, 0x94, 0xc6, 0x91, 0xbb, 0x3c, 0x25, 0xf7, 0x88, 0x45, 0xf2, - 0xfb, 0x34, 0x14, 0x1e, 0xf2, 0x57, 0xa6, 0xe5, 0x4f, 0x59, 0x52, 0x5b, 0x54, 0x33, 0xb6, 0xb8, - 0x01, 0xd5, 0x6e, 0xcc, 0x06, 0x7d, 0xa9, 0x0e, 0x53, 0xa5, 0x45, 0xfc, 0x6e, 0xfa, 0x8d, 0xbf, - 0x16, 0x01, 0xfe, 0xbf, 0x93, 0x3b, 0x81, 0x12, 0xc6, 0x4b, 0x05, 0x57, 0xc4, 0x71, 0x6e, 0x02, - 0xaa, 0xe6, 0x27, 0xa0, 0x4f, 0x80, 0x64, 0x7c, 0x2e, 0x89, 0x17, 0x13, 0x0d, 0x73, 0xf7, 0x8a, - 0x04, 0x9e, 0x09, 0x99, 0xc5, 0xce, 0x04, 0x3a, 0xb2, 0x14, 0x64, 0x2c, 0xf5, 0x2a, 0xcc, 0x2b, - 0x91, 0xee, 0x19, 0x8d, 0x79, 0xc0, 0x22, 0xcc, 0xf3, 0xa6, 0x33, 0xa7, 0xd0, 0x63, 0x05, 0x36, - 0x7e, 0x08, 0x37, 0x46, 0xab, 0x60, 0xaa, 0xce, 0xd8, 0xf0, 0x03, 0x98, 0x55, 0xb9, 0xcf, 0xb8, - 0xee, 0x26, 0x15, 0x5f, 0xe3, 0x53, 0xa8, 0xa7, 0x59, 0x6a, 0x52, 0xf8, 0xfb, 0xe3, 0xc2, 0xa7, - 0xbf, 0x05, 0xb4, 0xec, 0x63, 0x58, 0xd1, 0x61, 0x3f, 0x29, 0xf9, 0xbb, 0xe3, 0x92, 0xa7, 0xcd, - 0x45, 0x5a, 0xee, 0x6f, 0x0c, 0x58, 0xda, 0x89, 0xa9, 0x27, 0xa8, 0x9a, 0x73, 0xe8, 0x4f, 0x07, - 0x94, 0x0b, 0xf2, 0x35, 0x30, 0x63, 0x35, 0x6c, 0x26, 0x7e, 0x3d, 0x02, 0xc8, 0x6d, 0xb0, 0xb4, - 0x1f, 0x64, 0x52, 0x2a, 0x28, 0xe8, 0x40, 0x3b, 0xca, 0xc4, 0xdd, 0xce, 0xeb, 0xc5, 0xb5, 0xe2, - 0xba, 0xe9, 0x2c, 0x8c, 0x5f, 0xee, 0x5c, 0x96, 0x52, 0x1e, 0x1f, 0x46, 0x1d, 0x74, 0xdc, 0xaa, - 0xa3, 0x3e, 0x1a, 0x7f, 0x36, 0x80, 0x64, 0x76, 0x4b, 0x79, 0x9f, 0x45, 0x9c, 0x5e, 0xb1, 0xad, - 0xb7, 0xa1, 0x94, 0x89, 0xb7, 0x3b, 0xb9, 0x9a, 0x48, 0x44, 0x61, 0xa0, 0x21, 0xb9, 0x2c, 0xef, - 0x7a, 0xbc, 0xab, 0x43, 0x4b, 0x0e, 0xc9, 0x9b, 0x50, 0xf2, 0x3d, 0xe1, 0xe1, 0x96, 0xac, 0xad, - 0xdb, 0x97, 0x04, 0x2e, 0xee, 0x0e, 0x89, 0x1b, 0x7f, 0x30, 0xc0, 0x7e, 0x40, 0xc5, 0x97, 0xaa, - 0xc7, 0x9b, 0x60, 0x6a, 0x02, 0x9d, 0x91, 0x4d, 0xa7, 0xaa, 0x00, 0xcd, 0x3d, 0xe8, 0x3c, 0xa3, - 0x42, 0x71, 0x97, 0x34, 0x37, 0x42, 0xc8, 0x4d, 0xa0, 0xd4, 0xf7, 0xc4, 0x29, 0x46, 0xbd, 0xe9, - 0xe0, 0xb8, 0xf1, 0x03, 0x20, 0x8f, 0x03, 0x9e, 0x5c, 0x41, 0xd3, 0x6d, 0x33, 0xa7, 0x52, 0x2b, - 0xe4, 0x55, 0x6a, 0x8d, 0xbf, 0x18, 0xb0, 0x34, 0x26, 0xfd, 0x45, 0x99, 0xad, 0x38, 0xbd, 0xd9, - 0x8e, 0x60, 0x69, 0x97, 0x86, 0xf4, 0xcb, 0x0d, 0x80, 0xc6, 0xcf, 0x60, 0x79, 0x5c, 0xea, 0x7f, - 0x55, 0x13, 0x8d, 0x3f, 0x15, 0x61, 0xd9, 0xa1, 0x5c, 0xb0, 0xf8, 0x85, 0xc5, 0xf5, 0x1b, 0x90, - 0xc9, 0xdd, 0x2e, 0x1f, 0x9c, 0x9c, 0x04, 0x9f, 0x6b, 0x1f, 0xcd, 0xc8, 0x68, 0x21, 0x4e, 0xd8, - 0xd8, 0x6d, 0x11, 0x53, 0x25, 0x59, 0x15, 0x11, 0x1f, 0x5e, 0xa4, 0x86, 0x73, 0xa7, 0xcb, 0x64, - 0x67, 0x47, 0x89, 0x50, 0xcf, 0xba, 0xcc, 0x46, 0x34, 0x3e, 0xca, 0x3a, 0xe5, 0x4c, 0xd6, 0x99, - 0x8c, 0xa8, 0xca, 0x85, 0x11, 0x55, 0x1d, 0x45, 0xd4, 0xea, 0x2e, 0xac, 0xe4, 0xaf, 0x7b, 0xad, - 0x77, 0xe3, 0xdf, 0x0a, 0xa9, 0xc5, 0xd2, 0x6b, 0x40, 0x56, 0x05, 0xe7, 0x4a, 0x8b, 0x87, 0x39, - 0xa5, 0xc5, 0xdd, 0xcb, 0x54, 0xf4, 0x3f, 0x58, 0x5b, 0x34, 0x01, 0xeb, 0x4a, 0x5d, 0x16, 0xa0, - 0x9e, 0xaf, 0x73, 0x27, 0x82, 0x64, 0x56, 0xdf, 0x8d, 0xdf, 0x97, 0xe0, 0x25, 0x7d, 0xd0, 0x91, - 0x15, 0xbe, 0xd2, 0x8a, 0xfb, 0x3e, 0x58, 0xd2, 0x5f, 0x13, 0xe5, 0x94, 0x51, 0x39, 0xd7, 0xa8, - 0x46, 0x40, 0x72, 0xab, 0x6f, 0xf2, 0x16, 0xac, 0x08, 0x2f, 0xee, 0x52, 0xe1, 0x4e, 0x26, 0x70, - 0xe5, 0xdb, 0xcb, 0x6a, 0x76, 0x67, 0xfc, 0xc1, 0xed, 0xc1, 0xcb, 0xa3, 0xa7, 0x40, 0xac, 0x94, - 0xe1, 0x0a, 0x8f, 0x3f, 0xe3, 0xf5, 0xea, 0x25, 0xb5, 0x51, 0x9e, 0xfb, 0x3a, 0x2f, 0xa5, 0x92, - 0x32, 0x5a, 0xc5, 0xd6, 0x81, 0x16, 0xec, 0xbb, 0x58, 0xcd, 0xa9, 0xfa, 0xba, 0x96, 0x80, 0x2d, - 0x59, 0xd5, 0xbd, 0x06, 0x0b, 0x82, 0xa5, 0x1b, 0xc8, 0x14, 0x7d, 0x73, 0x82, 0x69, 0x69, 0x48, - 0x97, 0x75, 0x35, 0x6b, 0xdc, 0xd5, 0x1a, 0xbf, 0x2d, 0xc2, 0xe2, 0x58, 0xae, 0xf8, 0x4a, 0xfb, - 0x86, 0x0f, 0xf5, 0xb1, 0x3c, 0x99, 0x35, 0x4d, 0xf9, 0x92, 0x76, 0x57, 0x6e, 0x84, 0x38, 0x2b, - 0xd9, 0xbc, 0x78, 0x99, 0x71, 0x2a, 0xd3, 0x19, 0xa7, 0x7a, 0x95, 0x71, 0xcc, 0x09, 0xe3, 0xfc, - 0xdd, 0x48, 0x83, 0xf7, 0x85, 0xdc, 0x93, 0xe4, 0xfe, 0x58, 0xa1, 0xf7, 0xda, 0xd5, 0x37, 0x0d, - 0xea, 0x4d, 0x15, 0x0e, 0xfb, 0xb0, 0xf2, 0x80, 0x8a, 0xe4, 0xa8, 0xd2, 0x01, 0xa6, 0xbb, 0x64, - 0x95, 0xef, 0x15, 0x12, 0xdf, 0x6b, 0xfc, 0x18, 0xac, 0xcc, 0x9b, 0x94, 0xd4, 0xa1, 0x82, 0xad, - 0xd0, 0xe6, 0xae, 0x7e, 0xc8, 0x27, 0x9f, 0xe4, 0xed, 0xd1, 0xf3, 0xba, 0x80, 0xb6, 0xbe, 0x99, - 0x5f, 0xe1, 0x8c, 0xbf, 0xac, 0x1b, 0x7f, 0x34, 0xa0, 0xac, 0x65, 0xdf, 0x06, 0x8b, 0x46, 0x22, - 0x0e, 0xa8, 0xea, 0x85, 0x29, 0xf9, 0xa0, 0xa1, 0x83, 0x41, 0x4f, 0xbe, 0xa3, 0xd2, 0x97, 0x9d, - 0x7b, 0x12, 0xb3, 0x1e, 0xee, 0xb3, 0xe4, 0xcc, 0xa5, 0xe8, 0x7e, 0xcc, 0x7a, 0xe4, 0x0e, 0xd4, - 0x46, 0x64, 0x82, 0xa1, 0x46, 0x4b, 0x8e, 0x95, 0x62, 0x47, 0x4c, 0x3a, 0x71, 0xc8, 0xba, 0x2e, - 0xde, 0x96, 0xea, 0xd6, 0xaf, 0x84, 0xac, 0x7b, 0xe8, 0x89, 0xd3, 0x64, 0x2a, 0xd3, 0xfa, 0x90, - 0x53, 0xd2, 0x59, 0x1a, 0xef, 0x40, 0xed, 0x11, 0x1d, 0x1e, 0xcb, 0x5b, 0xf1, 0xd0, 0x0b, 0xe2, - 0x69, 0x6f, 0xd0, 0xc6, 0x17, 0x06, 0x00, 0x72, 0xa1, 0x26, 0xc9, 0x2d, 0x30, 0xdb, 0x8c, 0x85, - 0x2e, 0xda, 0x56, 0x32, 0x57, 0x1f, 0xce, 0x38, 0x55, 0x09, 0xed, 0x7a, 0xc2, 0x23, 0x37, 0xa1, - 0x1a, 0x44, 0x42, 0xcd, 0x4a, 0x31, 0xb3, 0x0f, 0x67, 0x9c, 0x4a, 0x10, 0x09, 0x9c, 0xbc, 0x05, - 0x66, 0xc8, 0xa2, 0xae, 0x9a, 0xc5, 0x26, 0x88, 0xe4, 0x95, 0x10, 0x4e, 0xdf, 0x06, 0x38, 0x09, - 0x99, 0xa7, 0xb9, 0xe5, 0xc9, 0x0a, 0x0f, 0x67, 0x1c, 0x13, 0x31, 0x24, 0xb8, 0x03, 0x96, 0xcf, - 0x06, 0xed, 0x90, 0x2a, 0x0a, 0x79, 0x40, 0xe3, 0xe1, 0x8c, 0x03, 0x0a, 0x4c, 0x48, 0xb8, 0x88, - 0x83, 0x64, 0x11, 0x6c, 0xf2, 0x48, 0x12, 0x05, 0x26, 0xcb, 0xb4, 0x87, 0x82, 0x72, 0x45, 0x21, - 0xe3, 0xaf, 0x26, 0x97, 0x41, 0x4c, 0x12, 0x6c, 0x97, 0x95, 0xe7, 0x36, 0xfe, 0x5d, 0xd2, 0xee, - 0xa3, 0xba, 0x9e, 0x97, 0xb8, 0x4f, 0xf2, 0xa0, 0x2f, 0x64, 0x1e, 0xf4, 0xdf, 0x80, 0xf9, 0x80, - 0xbb, 0xfd, 0x38, 0xe8, 0x79, 0xf1, 0xd0, 0x95, 0xaa, 0x2e, 0x62, 0x3d, 0x54, 0x0b, 0xf8, 0xa1, - 0x02, 0x1f, 0xd1, 0x21, 0x59, 0x03, 0xcb, 0xa7, 0xbc, 0x13, 0x07, 0x7d, 0x99, 0x2a, 0xb4, 0x39, - 0xb3, 0x10, 0xb9, 0x0f, 0xa6, 0xdc, 0x8d, 0x6a, 0xc9, 0xcf, 0x62, 0x54, 0xde, 0xca, 0x75, 0x4e, - 0xb9, 0xf7, 0xa3, 0x61, 0x9f, 0x3a, 0x55, 0x5f, 0x8f, 0xc8, 0x36, 0x58, 0x92, 0xcd, 0xd5, 0x5d, - 0x7b, 0x95, 0xc6, 0xf2, 0x63, 0x3a, 0xeb, 0x1b, 0x0e, 0x48, 0x2e, 0xd5, 0xa6, 0x27, 0xbb, 0x50, - 0x53, 0xdd, 0x4b, 0x2d, 0xa4, 0x32, 0xad, 0x10, 0xd5, 0xf4, 0xd4, 0x52, 0x56, 0xa0, 0xec, 0x0d, - 0x04, 0x6b, 0xee, 0x62, 0x26, 0xab, 0x3a, 0xfa, 0x8b, 0xbc, 0x0d, 0xb3, 0xaa, 0x1d, 0x67, 0xe2, - 0xc9, 0x6e, 0x5f, 0xdc, 0x57, 0x52, 0x69, 0x40, 0x51, 0x93, 0x0f, 0xa1, 0x46, 0x43, 0x8a, 0x5d, - 0x39, 0xd4, 0x0b, 0x4c, 0xa3, 0x17, 0x4b, 0xb3, 0xa0, 0x6a, 0x76, 0x61, 0xce, 0xa7, 0x27, 0xde, - 0x20, 0x14, 0xae, 0x72, 0x7a, 0xeb, 0x92, 0x07, 0xe9, 0xc8, 0xff, 0x9d, 0x9a, 0xe6, 0x42, 0x08, - 0x7f, 0x98, 0x70, 0xd7, 0x1f, 0x46, 0x5e, 0x2f, 0xe8, 0x60, 0x6b, 0xbb, 0xea, 0x98, 0x01, 0xdf, - 0x55, 0x00, 0x59, 0x07, 0x5b, 0xfa, 0x40, 0x7a, 0xe1, 0x4b, 0x2f, 0x98, 0x43, 0xa2, 0xf9, 0x80, - 0xa7, 0x97, 0xf9, 0x23, 0x3a, 0x6c, 0xfc, 0xc3, 0x00, 0x7b, 0xb2, 0xcd, 0x9e, 0xba, 0x95, 0x91, - 0x71, 0xab, 0x09, 0x87, 0x29, 0x9c, 0x77, 0x98, 0x91, 0xaa, 0x8b, 0x63, 0xaa, 0x7e, 0x17, 0xca, - 0xe8, 0xaf, 0x49, 0x6b, 0xf5, 0x92, 0x1e, 0x5e, 0xd2, 0xe6, 0x57, 0xf4, 0xe4, 0x5b, 0xb0, 0x4c, - 0x23, 0x0f, 0xe3, 0x4e, 0x1d, 0xcc, 0xc5, 0x09, 0xf4, 0xc6, 0xaa, 0x43, 0xd4, 0x9c, 0x3e, 0x33, - 0xf2, 0x6f, 0xfc, 0x1c, 0x6a, 0xd9, 0x4b, 0x82, 0x58, 0x50, 0x69, 0x0d, 0x3a, 0x1d, 0xca, 0xb9, - 0x3d, 0x43, 0x16, 0xc0, 0x3a, 0x60, 0xc2, 0x6d, 0x0d, 0xfa, 0x7d, 0x16, 0x0b, 0xdb, 0x20, 0x8b, - 0x30, 0x77, 0xc0, 0xdc, 0x43, 0x1a, 0xf7, 0x02, 0xce, 0x03, 0x16, 0xd9, 0x05, 0x52, 0x85, 0xd2, - 0xbe, 0x17, 0x84, 0x76, 0x91, 0x2c, 0xc3, 0x02, 0xfa, 0x10, 0x15, 0x34, 0x76, 0xf7, 0xe4, 0x5d, - 0x6f, 0xff, 0xaa, 0x48, 0x6e, 0x41, 0x5d, 0x5f, 0x09, 0xee, 0x93, 0xf6, 0x4f, 0x68, 0x47, 0xb8, - 0x52, 0xe4, 0x3e, 0x1b, 0x44, 0xbe, 0xfd, 0xeb, 0xe2, 0xc6, 0xe7, 0xb0, 0x94, 0xd3, 0xfd, 0x23, - 0x04, 0xe6, 0xb7, 0x3f, 0xda, 0x79, 0xf4, 0xf4, 0xd0, 0x6d, 0x1e, 0x34, 0x8f, 0x9a, 0x1f, 0x3d, - 0xb6, 0x67, 0xc8, 0x32, 0xd8, 0x1a, 0xdb, 0xfb, 0x64, 0x6f, 0xe7, 0xe9, 0x51, 0xf3, 0xe0, 0x81, - 0x6d, 0x64, 0x28, 0x5b, 0x4f, 0x77, 0x76, 0xf6, 0x5a, 0x2d, 0xbb, 0x20, 0xf7, 0xad, 0xb1, 0xfd, - 0x8f, 0x9a, 0x8f, 0xed, 0x62, 0x86, 0xe8, 0xa8, 0xf9, 0xf1, 0xde, 0x93, 0xa7, 0x47, 0x76, 0x69, - 0xe3, 0x38, 0x7d, 0x6b, 0x8c, 0x2f, 0x6d, 0x41, 0x65, 0xb4, 0xe6, 0x1c, 0x98, 0xd9, 0xc5, 0xa4, - 0x76, 0xd2, 0x55, 0xe4, 0xc9, 0x95, 0x78, 0x0b, 0x2a, 0x23, 0xb9, 0x9f, 0x48, 0xff, 0x98, 0xf8, - 0x17, 0x02, 0x50, 0x6e, 0x89, 0x98, 0x45, 0x5d, 0x7b, 0x06, 0x65, 0x50, 0xa5, 0x3d, 0x14, 0xb8, - 0x2d, 0x55, 0x41, 0x7d, 0xbb, 0x40, 0xe6, 0x01, 0xf6, 0xce, 0x68, 0x24, 0x06, 0x5e, 0x18, 0x0e, - 0xed, 0xa2, 0xfc, 0xde, 0x19, 0x70, 0xc1, 0x7a, 0xc1, 0x73, 0xea, 0xdb, 0xa5, 0x8d, 0xdf, 0x19, - 0x50, 0x4d, 0x62, 0x44, 0xae, 0x7e, 0xc0, 0x22, 0x6a, 0xcf, 0xc8, 0xd1, 0x36, 0x63, 0xa1, 0x6d, - 0xc8, 0x51, 0x33, 0x12, 0xef, 0xda, 0x05, 0x62, 0xc2, 0x6c, 0x33, 0x12, 0xdf, 0x7e, 0xc7, 0x2e, - 0xea, 0xe1, 0x9b, 0x5b, 0x76, 0x49, 0x0f, 0xdf, 0x79, 0xcb, 0x9e, 0x95, 0xc3, 0x7d, 0x99, 0xae, - 0x6d, 0x90, 0x9b, 0xdb, 0xc5, 0xbc, 0x6c, 0x5b, 0x7a, 0xa3, 0x41, 0xd4, 0xb5, 0x97, 0xe5, 0xde, - 0x8e, 0xbd, 0x78, 0xe7, 0xd4, 0x8b, 0xed, 0x97, 0x88, 0x0d, 0xb5, 0xed, 0x20, 0xf2, 0xe2, 0xe1, - 0x31, 0xed, 0x08, 0x16, 0xdb, 0xbe, 0x54, 0x32, 0x4a, 0xd0, 0x00, 0xdd, 0x38, 0x06, 0x18, 0xc5, - 0xbf, 0x64, 0xc0, 0x2f, 0xd5, 0x58, 0xf3, 0xed, 0x19, 0xe9, 0x3c, 0x23, 0x44, 0x2e, 0x61, 0xa4, - 0xd0, 0x6e, 0xcc, 0xfa, 0x7d, 0x09, 0x15, 0x52, 0x3e, 0x84, 0xa8, 0x6f, 0x17, 0xb7, 0xbe, 0x28, - 0xc1, 0xd2, 0xc7, 0x18, 0x00, 0xca, 0x53, 0x5a, 0x34, 0x3e, 0x0b, 0x3a, 0x94, 0x74, 0xa0, 0x96, - 0xed, 0xda, 0x91, 0xfc, 0xb7, 0x53, 0x4e, 0x63, 0x6f, 0xf5, 0xf5, 0xab, 0xba, 0x22, 0x3a, 0x22, - 0x1a, 0x33, 0xe4, 0x47, 0x60, 0xa6, 0xfd, 0x2c, 0x92, 0xff, 0x2f, 0x6c, 0xb2, 0xdf, 0x75, 0x1d, - 0xf1, 0x6d, 0xb0, 0x32, 0xbd, 0x22, 0x92, 0xcf, 0x79, 0xbe, 0x57, 0xb5, 0xba, 0x7e, 0x35, 0x61, - 0xba, 0x06, 0x85, 0x5a, 0xb6, 0x0d, 0x73, 0x81, 0x9e, 0x72, 0xfa, 0x3f, 0xab, 0x77, 0xa7, 0xa0, - 0x4c, 0x97, 0x39, 0x85, 0xb9, 0xb1, 0x2a, 0x91, 0xdc, 0x9d, 0xba, 0x67, 0xb1, 0xba, 0x31, 0x0d, - 0x69, 0xba, 0x52, 0x17, 0x60, 0x54, 0x74, 0x92, 0x37, 0x2e, 0x32, 0x4a, 0x4e, 0x55, 0x7a, 0xbd, - 0x85, 0xb6, 0xdf, 0xfb, 0xf4, 0x3b, 0xdd, 0x40, 0x9c, 0x0e, 0xda, 0x9b, 0x1d, 0xd6, 0xbb, 0xf7, - 0x3c, 0x08, 0xc3, 0xe0, 0xb9, 0xa0, 0x9d, 0xd3, 0x7b, 0x4a, 0xc8, 0x37, 0x15, 0xfb, 0xbd, 0x0e, - 0x8b, 0xe9, 0x3d, 0x14, 0x78, 0x4f, 0x21, 0xfd, 0x76, 0xbb, 0x8c, 0xdf, 0x6f, 0xfe, 0x27, 0x00, - 0x00, 0xff, 0xff, 0xf8, 0xe1, 0x34, 0x2c, 0x18, 0x20, 0x00, 0x00, + // 2690 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0x4b, 0x6f, 0x24, 0x49, + 0x11, 0x76, 0xbf, 0xbb, 0xa2, 0x1f, 0x2e, 0xa7, 0xbd, 0xde, 0x1e, 0xcf, 0xce, 0x8e, 0xa7, 0xd8, + 0x87, 0xc7, 0x2b, 0x3c, 0xe0, 0x7d, 0xb0, 0x3b, 0x62, 0x5f, 0x7e, 0xcd, 0x34, 0x33, 0xeb, 0xb1, + 0xca, 0x1e, 0x6b, 0xb4, 0x3c, 0x4a, 0xd5, 0x5d, 0xe9, 0x76, 0x31, 0xd5, 0x95, 0x4d, 0x65, 0xf6, + 0xec, 0xf6, 0x48, 0x70, 0x41, 0x48, 0x1c, 0x39, 0x70, 0x42, 0xfc, 0x01, 0x2e, 0x08, 0x84, 0xb8, + 0xf0, 0x0f, 0x40, 0x5c, 0xf8, 0x15, 0x08, 0xfe, 0x00, 0x57, 0x94, 0x91, 0x59, 0xd5, 0xd5, 0xed, + 0xb2, 0xdd, 0x46, 0x2b, 0x96, 0xe5, 0x96, 0x19, 0x19, 0x11, 0x99, 0x19, 0xf1, 0x45, 0x64, 0x54, + 0x74, 0x43, 0xbd, 0xe3, 0x76, 0x9f, 0x0e, 0x07, 0x1b, 0x83, 0x88, 0x09, 0x46, 0x16, 0xfb, 0x7e, + 0xf0, 0x6c, 0xc8, 0xd5, 0x6c, 0x43, 0x2d, 0xad, 0xbc, 0xd4, 0x63, 0xac, 0x17, 0xd0, 0x3b, 0x48, + 0xec, 0x0c, 0x4f, 0xee, 0x70, 0x11, 0x0d, 0xbb, 0x42, 0x31, 0x59, 0x7f, 0xcf, 0x81, 0xd1, 0x0e, + 0x3d, 0xfa, 0x79, 0x3b, 0x3c, 0x61, 0xe4, 0x06, 0xc0, 0x89, 0x4f, 0x03, 0xcf, 0x09, 0xdd, 0x3e, + 0x6d, 0xe5, 0x56, 0x73, 0x6b, 0x86, 0x6d, 0x20, 0x65, 0xdf, 0xed, 0x53, 0xb9, 0xec, 0x4b, 0x5e, + 0xb5, 0x9c, 0x57, 0xcb, 0x48, 0x99, 0x5c, 0x16, 0xa3, 0x01, 0x6d, 0x15, 0x52, 0xcb, 0x47, 0xa3, + 0x01, 0x25, 0x5b, 0x50, 0x1e, 0xb8, 0x91, 0xdb, 0xe7, 0xad, 0xe2, 0x6a, 0x61, 0xad, 0xb6, 0xb9, + 0xbe, 0x91, 0x71, 0xdc, 0x8d, 0xe4, 0x30, 0x1b, 0x07, 0xc8, 0xbc, 0x1b, 0x8a, 0x68, 0x64, 0x6b, + 0xc9, 0x95, 0xf7, 0xa0, 0x96, 0x22, 0x13, 0x13, 0x0a, 0x4f, 0xe9, 0x48, 0x1f, 0x54, 0x0e, 0xc9, + 0x12, 0x94, 0x9e, 0xb9, 0xc1, 0x30, 0x3e, 0x9d, 0x9a, 0xdc, 0xcd, 0xbf, 0x9b, 0xb3, 0xfe, 0x56, + 0x86, 0xa5, 0x6d, 0x16, 0x04, 0xb4, 0x2b, 0x7c, 0x16, 0x6e, 0xe1, 0x6e, 0x78, 0xe9, 0x26, 0xe4, + 0x7d, 0x4f, 0xeb, 0xc8, 0xfb, 0x1e, 0xb9, 0x07, 0xc0, 0x85, 0x2b, 0xa8, 0xd3, 0x65, 0x9e, 0xd2, + 0xd3, 0xdc, 0x5c, 0xcb, 0x3c, 0xab, 0x52, 0x72, 0xe4, 0xf2, 0xa7, 0x87, 0x52, 0x60, 0x9b, 0x79, + 0xd4, 0x36, 0x78, 0x3c, 0x24, 0x16, 0xd4, 0x69, 0x14, 0xb1, 0xe8, 0x13, 0xca, 0xb9, 0xdb, 0x8b, + 0x2d, 0x32, 0x41, 0x93, 0x36, 0xe3, 0xc2, 0x8d, 0x84, 0x23, 0xfc, 0x3e, 0x6d, 0x15, 0x57, 0x73, + 0x6b, 0x05, 0x54, 0x11, 0x89, 0x23, 0xbf, 0x4f, 0xc9, 0x35, 0xa8, 0xd2, 0xd0, 0x53, 0x8b, 0x25, + 0x5c, 0xac, 0xd0, 0xd0, 0xc3, 0xa5, 0x15, 0xa8, 0x0e, 0x22, 0xd6, 0x8b, 0x28, 0xe7, 0xad, 0xf2, + 0x6a, 0x6e, 0xad, 0x64, 0x27, 0x73, 0xf2, 0x35, 0x68, 0x74, 0x93, 0xab, 0x3a, 0xbe, 0xd7, 0xaa, + 0xa0, 0x6c, 0x7d, 0x4c, 0x6c, 0x7b, 0xe4, 0x45, 0xa8, 0x78, 0x1d, 0xe5, 0xca, 0x2a, 0x9e, 0xac, + 0xec, 0x75, 0xd0, 0x8f, 0xaf, 0xc3, 0x7c, 0x4a, 0x1a, 0x19, 0x0c, 0x64, 0x68, 0x8e, 0xc9, 0xc8, + 0xf8, 0x3e, 0x94, 0x79, 0xf7, 0x94, 0xf6, 0xdd, 0x16, 0xac, 0xe6, 0xd6, 0x6a, 0x9b, 0xaf, 0x66, + 0x5a, 0x69, 0x6c, 0xf4, 0x43, 0x64, 0xb6, 0xb5, 0x10, 0xde, 0xfd, 0xd4, 0x8d, 0x3c, 0xee, 0x84, + 0xc3, 0x7e, 0xab, 0x86, 0x77, 0x30, 0x14, 0x65, 0x7f, 0xd8, 0x27, 0x36, 0x2c, 0x74, 0x59, 0xc8, + 0x7d, 0x2e, 0x68, 0xd8, 0x1d, 0x39, 0x01, 0x7d, 0x46, 0x83, 0x56, 0x1d, 0xdd, 0x71, 0xde, 0x46, + 0x09, 0xf7, 0x43, 0xc9, 0x6c, 0x9b, 0xdd, 0x29, 0x0a, 0x79, 0x0c, 0x0b, 0x03, 0x37, 0x12, 0x3e, + 0xde, 0x4c, 0x89, 0xf1, 0x56, 0x03, 0xe1, 0x98, 0xed, 0xe2, 0x83, 0x98, 0x7b, 0x0c, 0x18, 0xdb, + 0x1c, 0x4c, 0x12, 0x39, 0xb9, 0x0d, 0xa6, 0xe2, 0x47, 0x4f, 0x71, 0xe1, 0xf6, 0x07, 0xad, 0xe6, + 0x6a, 0x6e, 0xad, 0x68, 0xcf, 0x2b, 0xfa, 0x51, 0x4c, 0x26, 0x04, 0x8a, 0xdc, 0x7f, 0x4e, 0x5b, + 0xf3, 0xe8, 0x11, 0x1c, 0x93, 0xeb, 0x60, 0x9c, 0xba, 0xdc, 0xc1, 0x50, 0x69, 0x99, 0xab, 0xb9, + 0xb5, 0xaa, 0x5d, 0x3d, 0x75, 0x39, 0x86, 0x02, 0xf9, 0x10, 0x6a, 0x2a, 0xaa, 0xfc, 0xf0, 0x84, + 0xf1, 0xd6, 0x02, 0x1e, 0xf6, 0xe5, 0x8b, 0x63, 0xc7, 0x56, 0x81, 0x28, 0x87, 0x5c, 0x9a, 0x39, + 0x60, 0xae, 0xe7, 0x20, 0x30, 0x5b, 0x44, 0x85, 0xa5, 0xa4, 0x20, 0x68, 0xc9, 0x5d, 0xb8, 0xa6, + 0xcf, 0x3e, 0x38, 0x1d, 0x71, 0xbf, 0xeb, 0x06, 0xa9, 0x4b, 0x2c, 0xe2, 0x25, 0x5e, 0x54, 0x0c, + 0x07, 0x7a, 0x3d, 0xb9, 0x8c, 0xf5, 0xf3, 0x3c, 0x2c, 0x66, 0x58, 0x88, 0xdc, 0x82, 0xfa, 0xd8, + 0xcc, 0x3a, 0xb8, 0x0a, 0x76, 0x2d, 0xa1, 0xb5, 0x3d, 0xf2, 0x2a, 0x34, 0xc7, 0x2c, 0xa9, 0x7c, + 0xd2, 0x48, 0xa8, 0x08, 0xb1, 0x33, 0x48, 0x2e, 0x64, 0x20, 0xf9, 0x11, 0xcc, 0x73, 0xda, 0xeb, + 0xd3, 0x50, 0x24, 0x3e, 0x55, 0x29, 0xe6, 0xb5, 0x4c, 0x33, 0x1d, 0x2a, 0xde, 0x94, 0x47, 0x9b, + 0x3c, 0x4d, 0xe2, 0x89, 0x93, 0x4a, 0x29, 0x27, 0x4d, 0x9a, 0xb1, 0x3c, 0x65, 0x46, 0xeb, 0x67, + 0x05, 0x58, 0x38, 0xa3, 0x18, 0x21, 0xae, 0x4f, 0x96, 0x98, 0xc1, 0xd0, 0x94, 0xb6, 0x77, 0xf6, + 0x76, 0xf9, 0x8c, 0xdb, 0x4d, 0x1b, 0xb3, 0x70, 0xd6, 0x98, 0x2f, 0x43, 0x2d, 0x1c, 0xf6, 0x1d, + 0x76, 0xe2, 0x44, 0xec, 0x33, 0x1e, 0xa7, 0x91, 0x70, 0xd8, 0x7f, 0x74, 0x62, 0xb3, 0xcf, 0x38, + 0xb9, 0x0b, 0x95, 0x8e, 0x1f, 0x06, 0xac, 0xc7, 0x5b, 0x25, 0x34, 0xcc, 0x6a, 0xa6, 0x61, 0xf6, + 0x64, 0xa6, 0xdf, 0x42, 0x46, 0x3b, 0x16, 0x20, 0x1f, 0x00, 0xa6, 0x34, 0x8e, 0xd2, 0xe5, 0x19, + 0xa5, 0xc7, 0x22, 0x52, 0xde, 0xa3, 0x81, 0x70, 0x51, 0xbe, 0x32, 0xab, 0x7c, 0x22, 0x92, 0xf8, + 0xa2, 0x9a, 0xf2, 0xc5, 0x35, 0xa8, 0xf6, 0x22, 0x36, 0x1c, 0x48, 0x73, 0x18, 0x2a, 0x2d, 0xe2, + 0xbc, 0xed, 0x59, 0x7f, 0x28, 0x00, 0xfc, 0x7f, 0x27, 0x77, 0x02, 0x45, 0x8c, 0x97, 0x0a, 0xee, + 0x88, 0xe3, 0xcc, 0x04, 0x54, 0xcd, 0x4e, 0x40, 0x4f, 0x80, 0xa4, 0x30, 0x17, 0xc7, 0x8b, 0x81, + 0x8e, 0xb9, 0x7d, 0x49, 0x02, 0x4f, 0x85, 0xcc, 0x42, 0x77, 0x8a, 0x3a, 0xf6, 0x14, 0xa4, 0x3c, + 0xf5, 0x2a, 0x34, 0x95, 0x4a, 0xe7, 0x19, 0x8d, 0xb8, 0xcf, 0x42, 0xcc, 0xf3, 0x86, 0xdd, 0x50, + 0xd4, 0x63, 0x45, 0xb4, 0xbe, 0x07, 0xd7, 0xc6, 0xbb, 0x60, 0xaa, 0x4e, 0xf9, 0xf0, 0x43, 0x28, + 0xa9, 0xdc, 0x97, 0xbb, 0xea, 0x21, 0x95, 0x9c, 0xf5, 0x29, 0xb4, 0x92, 0x2c, 0x35, 0xad, 0xfc, + 0x83, 0x49, 0xe5, 0xb3, 0xbf, 0x02, 0x5a, 0xf7, 0x31, 0x2c, 0xeb, 0xb0, 0x9f, 0xd6, 0xfc, 0xed, + 0x49, 0xcd, 0xb3, 0xe6, 0x22, 0xad, 0xf7, 0x9f, 0x39, 0x58, 0xdc, 0x8e, 0xa8, 0x2b, 0xa8, 0x5a, + 0xb3, 0xe9, 0x8f, 0x86, 0x94, 0x0b, 0xf2, 0x12, 0x18, 0x91, 0x1a, 0xb6, 0x63, 0x5c, 0x8f, 0x09, + 0xe4, 0x26, 0xd4, 0x34, 0x0e, 0x52, 0x29, 0x15, 0x14, 0x69, 0x5f, 0x03, 0x65, 0xea, 0x6d, 0xe7, + 0xad, 0xc2, 0x6a, 0x61, 0xcd, 0xb0, 0xe7, 0x27, 0x1f, 0x77, 0x2e, 0x4b, 0x29, 0x97, 0x8f, 0xc2, + 0x2e, 0x02, 0xb7, 0x6a, 0xab, 0x09, 0x79, 0x1f, 0x9a, 0x5e, 0xc7, 0x19, 0xf3, 0x72, 0x84, 0x6e, + 0x6d, 0x73, 0x79, 0x43, 0xd5, 0x99, 0x1b, 0x71, 0x9d, 0xb9, 0x71, 0x2c, 0x4b, 0x2f, 0xbb, 0xe1, + 0x75, 0xc6, 0xae, 0x41, 0xa5, 0x27, 0x2c, 0xea, 0xaa, 0x04, 0x5a, 0xb5, 0xd5, 0xc4, 0xfa, 0x5d, + 0x0e, 0x48, 0xca, 0x04, 0x94, 0x0f, 0x58, 0xc8, 0xe9, 0x25, 0x77, 0x7d, 0x1b, 0x8a, 0xa9, 0x20, + 0xbe, 0x95, 0x69, 0xde, 0x58, 0x15, 0x46, 0x2f, 0xb2, 0xcb, 0x9a, 0xb1, 0xcf, 0x7b, 0x3a, 0x5e, + 0xe5, 0x90, 0xbc, 0x09, 0x45, 0xcf, 0x15, 0x2e, 0xde, 0xb3, 0xb6, 0x79, 0xf3, 0x82, 0x6c, 0x80, + 0xa7, 0x43, 0x66, 0xeb, 0x2f, 0x39, 0x30, 0xef, 0x51, 0xf1, 0x85, 0x3a, 0xe7, 0x3a, 0x18, 0x9a, + 0x41, 0xa7, 0x79, 0xc3, 0xae, 0x2a, 0x82, 0x96, 0x1e, 0x76, 0x9f, 0x52, 0xa1, 0xa4, 0x8b, 0x5a, + 0x1a, 0x49, 0x28, 0x4d, 0xa0, 0x38, 0x70, 0xc5, 0x29, 0xfa, 0xc3, 0xb0, 0x71, 0x2c, 0xc3, 0xef, + 0x33, 0x5f, 0x9c, 0xb2, 0xa1, 0x70, 0x3c, 0x2a, 0x5c, 0x3f, 0xd0, 0x76, 0x6f, 0x68, 0xea, 0x0e, + 0x12, 0xad, 0xef, 0x02, 0x79, 0xe8, 0xf3, 0xf8, 0xf9, 0x9b, 0xed, 0x36, 0x19, 0x55, 0x62, 0x3e, + 0xab, 0x4a, 0xb4, 0x7e, 0x9f, 0x83, 0xc5, 0x09, 0xed, 0x5f, 0x96, 0x77, 0x0b, 0xb3, 0x7b, 0xf7, + 0x08, 0x16, 0x77, 0x68, 0x40, 0xbf, 0xd8, 0xe0, 0xb3, 0x7e, 0x0c, 0x4b, 0x93, 0x5a, 0xff, 0xab, + 0x96, 0xb0, 0x7e, 0x5b, 0x84, 0x25, 0x9b, 0x72, 0xc1, 0xa2, 0x2f, 0x2d, 0xa7, 0xbc, 0x01, 0xa9, + 0x77, 0xc3, 0xe1, 0xc3, 0x93, 0x13, 0xff, 0x73, 0x0d, 0xe5, 0x94, 0x8e, 0x43, 0xa4, 0x13, 0x36, + 0xf1, 0x52, 0x45, 0x54, 0x69, 0x56, 0x05, 0xcc, 0x47, 0xe7, 0x99, 0xe1, 0xcc, 0xed, 0x52, 0x2f, + 0x83, 0xad, 0x54, 0xa8, 0x4f, 0xca, 0xd4, 0x41, 0x34, 0x7d, 0x9c, 0xf1, 0xca, 0xe9, 0x8c, 0x37, + 0x15, 0x78, 0x95, 0x73, 0x03, 0xaf, 0x9a, 0x0a, 0xbc, 0xb3, 0x69, 0xd2, 0xb8, 0x4a, 0x9a, 0x5c, + 0x81, 0x6a, 0x9f, 0x0a, 0xf7, 0x51, 0x18, 0x8c, 0xf0, 0x39, 0xad, 0xda, 0xc9, 0x5c, 0x56, 0x1e, + 0x91, 0xba, 0x27, 0xd6, 0xfb, 0xf8, 0xa0, 0x56, 0xed, 0x09, 0xda, 0xca, 0x0e, 0x2c, 0x67, 0x5f, + 0xfb, 0x4a, 0x9f, 0xcc, 0x7f, 0xcc, 0x27, 0x80, 0x49, 0x5e, 0x40, 0x59, 0x10, 0x9d, 0xa9, 0xaa, + 0xee, 0x67, 0x54, 0x55, 0xb7, 0x2f, 0xf2, 0xd0, 0xff, 0x60, 0x59, 0xd5, 0x06, 0x2c, 0xa9, 0x75, + 0x45, 0x84, 0x6e, 0xbe, 0x4a, 0x39, 0x00, 0x52, 0x58, 0xcd, 0xad, 0x9f, 0x96, 0xe0, 0x05, 0x7d, + 0xd1, 0xb1, 0x17, 0xbe, 0xd2, 0x86, 0xfb, 0x0e, 0xd4, 0x24, 0x96, 0x63, 0xe3, 0x94, 0xd1, 0x38, + 0x57, 0x28, 0xc4, 0x40, 0x4a, 0xab, 0x39, 0x79, 0x0b, 0x96, 0x85, 0x1b, 0xf5, 0xa8, 0x70, 0xa6, + 0xdf, 0x0f, 0x15, 0x5a, 0x4b, 0x6a, 0x75, 0x7b, 0xb2, 0xd7, 0xe0, 0xc2, 0x8b, 0xe3, 0xaf, 0x20, + 0x8d, 0x75, 0x47, 0xb8, 0xfc, 0x29, 0x6f, 0x55, 0x2f, 0x28, 0x0b, 0xb3, 0xe0, 0x6b, 0xbf, 0x90, + 0x68, 0x4a, 0x59, 0x15, 0xbb, 0x26, 0x5a, 0xb1, 0xe7, 0x60, 0x21, 0xab, 0x3e, 0x2d, 0xe2, 0xc8, + 0xf2, 0x0e, 0x65, 0x41, 0xfb, 0x1a, 0xcc, 0x0b, 0x96, 0x1c, 0x20, 0x55, 0xef, 0x36, 0x04, 0xd3, + 0xda, 0x90, 0x2f, 0x0d, 0xb5, 0xda, 0x14, 0xd4, 0x5e, 0x81, 0xa6, 0xb6, 0x40, 0xdc, 0x80, 0xa9, + 0x2b, 0x6f, 0x29, 0xea, 0x8e, 0x6a, 0xc3, 0xa4, 0x73, 0x40, 0xe3, 0x92, 0x1c, 0xd0, 0x3c, 0x9b, + 0x03, 0xac, 0x5f, 0x15, 0x60, 0x61, 0x22, 0x21, 0x7e, 0xa5, 0x11, 0xe8, 0x41, 0x6b, 0xe2, 0x31, + 0x48, 0x03, 0xa0, 0x7c, 0x41, 0x3f, 0x31, 0x33, 0x0e, 0xed, 0xe5, 0x74, 0xf2, 0xbf, 0x08, 0x02, + 0x95, 0xd9, 0x20, 0x50, 0xbd, 0x0c, 0x02, 0xc6, 0x24, 0x04, 0xac, 0x3f, 0xe5, 0x92, 0x14, 0xf1, + 0xa5, 0x14, 0x03, 0xe4, 0xee, 0x44, 0xd1, 0xfb, 0xda, 0xe5, 0xcf, 0x29, 0xda, 0x4d, 0x55, 0x47, + 0x7b, 0xb0, 0x7c, 0x8f, 0x8a, 0xf8, 0xaa, 0x12, 0x00, 0xb3, 0x55, 0x12, 0x0a, 0x7b, 0xf9, 0x18, + 0x7b, 0xd6, 0x0f, 0xa0, 0x96, 0xfa, 0xe8, 0x27, 0x2d, 0xa8, 0x60, 0xaf, 0xb9, 0xbd, 0xa3, 0x3b, + 0x25, 0xf1, 0x94, 0xbc, 0x3d, 0xee, 0x5f, 0xe4, 0xd1, 0xd7, 0xd7, 0xb3, 0xcb, 0xb8, 0xc9, 0xd6, + 0x85, 0xf5, 0x9b, 0x1c, 0x94, 0xb5, 0xee, 0x9b, 0x50, 0xa3, 0xa1, 0x88, 0x7c, 0xaa, 0x9a, 0x8d, + 0x4a, 0x3f, 0x68, 0xd2, 0xfe, 0xb0, 0x2f, 0x2b, 0xe5, 0xe4, 0xd3, 0xd9, 0x39, 0x89, 0x58, 0x1f, + 0xcf, 0x59, 0xb4, 0x1b, 0x09, 0x75, 0x2f, 0x62, 0x7d, 0x72, 0x0b, 0xea, 0x63, 0x36, 0xc1, 0xd0, + 0xa2, 0x45, 0xbb, 0x96, 0xd0, 0x8e, 0x98, 0x04, 0x71, 0xc0, 0x7a, 0x0e, 0x96, 0x04, 0xaa, 0xb4, + 0xa9, 0x04, 0xac, 0x77, 0x20, 0xab, 0x02, 0xbd, 0x94, 0xea, 0x2d, 0xc9, 0x25, 0x09, 0x16, 0xeb, + 0x1d, 0xa8, 0x3f, 0xa0, 0x23, 0x2c, 0x06, 0x0e, 0x5c, 0x3f, 0x9a, 0xf5, 0x9d, 0xb6, 0xfe, 0x95, + 0x03, 0x40, 0x29, 0xb4, 0x24, 0xb9, 0x01, 0x46, 0x87, 0xb1, 0xc0, 0x41, 0xdf, 0x4a, 0xe1, 0xea, + 0xfd, 0x39, 0xbb, 0x2a, 0x49, 0x3b, 0xae, 0x70, 0xc9, 0x75, 0xa8, 0xfa, 0xa1, 0x50, 0xab, 0x52, + 0x4d, 0xe9, 0xfe, 0x9c, 0x5d, 0xf1, 0x43, 0x81, 0x8b, 0x37, 0xc0, 0x08, 0x58, 0xd8, 0x53, 0xab, + 0xd8, 0x65, 0x92, 0xb2, 0x92, 0x84, 0xcb, 0x37, 0x01, 0x4e, 0x02, 0xe6, 0x6a, 0x69, 0x79, 0xb3, + 0xfc, 0xfd, 0x39, 0xdb, 0x40, 0x1a, 0x32, 0xdc, 0x82, 0x9a, 0xc7, 0x86, 0x9d, 0x80, 0x2a, 0x0e, + 0x79, 0xc1, 0xdc, 0xfd, 0x39, 0x1b, 0x14, 0x31, 0x66, 0xe1, 0x22, 0xf2, 0xe3, 0x4d, 0xb0, 0x8b, + 0x26, 0x59, 0x14, 0x31, 0xde, 0xa6, 0x33, 0x12, 0x94, 0x2b, 0x0e, 0x19, 0x7f, 0x75, 0xb9, 0x0d, + 0xd2, 0x24, 0xc3, 0x56, 0x59, 0x21, 0xd7, 0xfa, 0x47, 0x51, 0xc3, 0x47, 0xb5, 0x95, 0x2f, 0x80, + 0x4f, 0xdc, 0x31, 0xc9, 0xa7, 0x3a, 0x26, 0xaf, 0x40, 0xd3, 0xe7, 0xce, 0x20, 0xf2, 0xfb, 0x6e, + 0x34, 0x72, 0xa4, 0xa9, 0x0b, 0x2a, 0x87, 0xfa, 0xfc, 0x40, 0x11, 0x1f, 0xd0, 0x11, 0x59, 0x85, + 0x9a, 0x47, 0x79, 0x37, 0xf2, 0x07, 0x32, 0x55, 0x68, 0x77, 0xa6, 0x49, 0xe4, 0x2e, 0x18, 0xf2, + 0x34, 0xea, 0x37, 0x8f, 0x12, 0x46, 0xe5, 0x8d, 0x4c, 0x70, 0xca, 0xb3, 0x1f, 0x8d, 0x06, 0xd4, + 0xae, 0x7a, 0x7a, 0x44, 0xb6, 0xa0, 0x26, 0xc5, 0x1c, 0xfd, 0xb3, 0x88, 0x4a, 0x63, 0xd9, 0x31, + 0x9d, 0xc6, 0x86, 0x0d, 0x52, 0x4a, 0xfd, 0x0e, 0x42, 0x76, 0xa0, 0xae, 0xda, 0xc3, 0x5a, 0x49, + 0x65, 0x56, 0x25, 0xaa, 0xab, 0xac, 0xb5, 0x2c, 0x43, 0xd9, 0x1d, 0x0a, 0xd6, 0xde, 0xc1, 0x4c, + 0x56, 0xb5, 0xf5, 0x8c, 0xbc, 0x0d, 0x25, 0xd5, 0xef, 0x34, 0xf0, 0x66, 0x37, 0xcf, 0x6f, 0xdc, + 0xa9, 0x34, 0xa0, 0xb8, 0xc9, 0x47, 0x50, 0xa7, 0x01, 0xc5, 0xb6, 0x27, 0xda, 0x05, 0x66, 0xb1, + 0x4b, 0x4d, 0x8b, 0xa0, 0x69, 0x76, 0xa0, 0xe1, 0xd1, 0x13, 0x77, 0x18, 0x08, 0x47, 0x81, 0xbe, + 0x76, 0xc1, 0xc7, 0xf9, 0x18, 0xff, 0x76, 0x5d, 0x4b, 0x21, 0x09, 0x7f, 0x91, 0xe2, 0x8e, 0x37, + 0x0a, 0xdd, 0xbe, 0xdf, 0xc5, 0x47, 0xb6, 0x6a, 0x1b, 0x3e, 0xdf, 0x51, 0x04, 0xb2, 0x06, 0xa6, + 0xc4, 0x40, 0x52, 0x56, 0x48, 0x14, 0xa8, 0x97, 0xb6, 0xe9, 0xf3, 0xa4, 0x64, 0x78, 0x40, 0x47, + 0xd6, 0x5f, 0x73, 0x60, 0x4e, 0xff, 0x8e, 0x91, 0xc0, 0x2a, 0x97, 0x82, 0xd5, 0x14, 0x60, 0xf2, + 0x67, 0x01, 0x33, 0x36, 0x75, 0x61, 0xc2, 0xd4, 0xef, 0x42, 0x19, 0xf1, 0x1a, 0xf7, 0xae, 0x2f, + 0x68, 0x92, 0xc6, 0xbf, 0xa3, 0x28, 0x7e, 0xf2, 0x0d, 0x58, 0xa2, 0xa1, 0x8b, 0x71, 0xa7, 0x2e, + 0xe6, 0xe0, 0x02, 0xa2, 0xb1, 0x6a, 0x13, 0xb5, 0xa6, 0xef, 0x8c, 0xf2, 0x56, 0x13, 0xea, 0xdb, + 0xa7, 0xb4, 0xfb, 0x54, 0xa7, 0x6d, 0xeb, 0x09, 0x34, 0xf4, 0x5c, 0x3f, 0x42, 0xf1, 0x33, 0x93, + 0xfb, 0x8f, 0x9e, 0x99, 0x7c, 0xf2, 0xcc, 0xac, 0xff, 0x04, 0xea, 0x69, 0x3e, 0x52, 0x83, 0xca, + 0xe1, 0xb0, 0xdb, 0xa5, 0x9c, 0x9b, 0x73, 0x64, 0x1e, 0x6a, 0xfb, 0x4c, 0x38, 0x87, 0xc3, 0xc1, + 0x80, 0x45, 0xc2, 0xcc, 0x91, 0x05, 0x68, 0xec, 0x33, 0xe7, 0x80, 0x46, 0x7d, 0x9f, 0x73, 0x9f, + 0x85, 0x66, 0x9e, 0x54, 0xa1, 0xb8, 0xe7, 0xfa, 0x81, 0x59, 0x20, 0x4b, 0x30, 0x8f, 0x68, 0xa5, + 0x82, 0x46, 0xce, 0xae, 0xac, 0x2a, 0xcc, 0x5f, 0x14, 0xc8, 0x0d, 0x68, 0xe9, 0x5b, 0x38, 0x8f, + 0x3a, 0x3f, 0xa4, 0x5d, 0xe1, 0x48, 0x95, 0x7b, 0x6c, 0x18, 0x7a, 0xe6, 0x2f, 0x0b, 0xeb, 0x9f, + 0xc3, 0x62, 0x46, 0x23, 0x97, 0x10, 0x68, 0x6e, 0x7d, 0xbc, 0xfd, 0xe0, 0xf1, 0x81, 0xd3, 0xde, + 0x6f, 0x1f, 0xb5, 0x3f, 0x7e, 0x68, 0xce, 0x91, 0x25, 0x30, 0x35, 0x6d, 0xf7, 0xc9, 0xee, 0xf6, + 0xe3, 0xa3, 0xf6, 0xfe, 0x3d, 0x33, 0x97, 0xe2, 0x3c, 0x7c, 0xbc, 0xbd, 0xbd, 0x7b, 0x78, 0x68, + 0xe6, 0xe5, 0xb9, 0x35, 0x6d, 0xef, 0xe3, 0xf6, 0x43, 0xb3, 0x90, 0x62, 0x3a, 0x6a, 0x7f, 0xb2, + 0xfb, 0xe8, 0xf1, 0x91, 0x59, 0x5c, 0x3f, 0x4e, 0xbe, 0x9d, 0x26, 0xb7, 0xae, 0x41, 0x65, 0xbc, + 0x67, 0x03, 0x8c, 0xf4, 0x66, 0xd2, 0x3a, 0xc9, 0x2e, 0xf2, 0xe6, 0x4a, 0x7d, 0x0d, 0x2a, 0x63, + 0xbd, 0x4f, 0x24, 0x12, 0xa7, 0x7e, 0xd6, 0x02, 0x28, 0x1f, 0x8a, 0x88, 0x85, 0x3d, 0x73, 0x0e, + 0x75, 0x50, 0x65, 0x3d, 0x54, 0xb8, 0x25, 0x4d, 0x41, 0x3d, 0x33, 0x4f, 0x9a, 0x00, 0xbb, 0xcf, + 0x68, 0x28, 0x86, 0x6e, 0x10, 0x8c, 0xcc, 0x82, 0x9c, 0x6f, 0x0f, 0xb9, 0x60, 0x7d, 0xff, 0x39, + 0xf5, 0xcc, 0xe2, 0xfa, 0xaf, 0x73, 0x50, 0x8d, 0xa3, 0x51, 0xee, 0xbe, 0xcf, 0x42, 0x6a, 0xce, + 0xc9, 0xd1, 0x16, 0x63, 0x81, 0x99, 0x93, 0xa3, 0x76, 0x28, 0xde, 0x35, 0xf3, 0xc4, 0x80, 0x52, + 0x3b, 0x14, 0xdf, 0x7c, 0xc7, 0x2c, 0xe8, 0xe1, 0x9b, 0x9b, 0x66, 0x51, 0x0f, 0xdf, 0x79, 0xcb, + 0x2c, 0xc9, 0xe1, 0x9e, 0x7c, 0x18, 0x4c, 0x90, 0x87, 0xdb, 0xc1, 0x17, 0xc0, 0xac, 0xe9, 0x83, + 0xfa, 0x61, 0xcf, 0x5c, 0x92, 0x67, 0x3b, 0x76, 0xa3, 0xed, 0x53, 0x37, 0x32, 0x5f, 0x20, 0x26, + 0xd4, 0xb7, 0xfc, 0xd0, 0x8d, 0x46, 0xc7, 0xb4, 0x2b, 0x58, 0x64, 0x7a, 0xd2, 0xc8, 0xa8, 0x41, + 0x13, 0xe8, 0xfa, 0x31, 0xc0, 0x38, 0xd3, 0x48, 0x01, 0x9c, 0xa9, 0x1e, 0xa9, 0x67, 0xce, 0x49, + 0xf0, 0x8c, 0x29, 0x72, 0x8b, 0x5c, 0x42, 0xda, 0x89, 0xd8, 0x60, 0x20, 0x49, 0xf9, 0x44, 0x0e, + 0x49, 0xd4, 0x33, 0x0b, 0x9b, 0x7f, 0x2e, 0xc1, 0xe2, 0x27, 0x88, 0x6f, 0x85, 0x94, 0x43, 0x1a, + 0x3d, 0xf3, 0xbb, 0x94, 0x74, 0xa1, 0x9e, 0x6e, 0xc0, 0x92, 0xec, 0x6f, 0xc1, 0x8c, 0x1e, 0xed, + 0xca, 0xeb, 0x97, 0x35, 0x99, 0x74, 0x44, 0x58, 0x73, 0xe4, 0xfb, 0x60, 0x24, 0x5d, 0x44, 0x92, + 0xfd, 0xb3, 0xe6, 0x74, 0x97, 0xf1, 0x2a, 0xea, 0x3b, 0x50, 0x4b, 0xb5, 0xde, 0x48, 0xb6, 0xe4, + 0xd9, 0xd6, 0xdf, 0xca, 0xda, 0xe5, 0x8c, 0xc9, 0x1e, 0x14, 0xea, 0xe9, 0xae, 0xd6, 0x39, 0x76, + 0xca, 0x68, 0xa7, 0xad, 0xdc, 0x9e, 0x81, 0x33, 0xd9, 0xe6, 0x14, 0x1a, 0x13, 0xf5, 0x28, 0xb9, + 0x3d, 0x73, 0x0b, 0x68, 0x65, 0x7d, 0x16, 0xd6, 0x64, 0xa7, 0x1e, 0xc0, 0xb8, 0xbc, 0x25, 0x6f, + 0x9c, 0xe7, 0x94, 0x8c, 0xfa, 0xf7, 0x8a, 0x1b, 0x1d, 0x40, 0x09, 0xd3, 0x2e, 0xc9, 0x4e, 0xb0, + 0xe9, 0x14, 0xbd, 0x62, 0x5d, 0xc4, 0x12, 0x6b, 0xdc, 0x7a, 0xef, 0xd3, 0x6f, 0xf5, 0x7c, 0x71, + 0x3a, 0xec, 0x6c, 0x74, 0x59, 0xff, 0xce, 0x73, 0x3f, 0x08, 0xfc, 0xe7, 0x82, 0x76, 0x4f, 0xef, + 0x28, 0xe1, 0xaf, 0x2b, 0xb1, 0x3b, 0x5d, 0x16, 0xe9, 0x3f, 0x84, 0xdc, 0x51, 0x94, 0x41, 0xa7, + 0x53, 0xc6, 0xf9, 0x9b, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x98, 0xcb, 0xc0, 0x53, 0x22, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2659,6 +2834,8 @@ type MilvusBackupServiceClient interface { RestoreBackup(ctx context.Context, in *RestoreBackupRequest, opts ...grpc.CallOption) (*RestoreBackupResponse, error) // Get restore state by given id GetRestore(ctx context.Context, in *GetRestoreStateRequest, opts ...grpc.CallOption) (*RestoreBackupResponse, error) + // Check connections + Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) } type milvusBackupServiceClient struct { @@ -2723,6 +2900,15 @@ func (c *milvusBackupServiceClient) GetRestore(ctx context.Context, in *GetResto return out, nil } +func (c *milvusBackupServiceClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) { + out := new(CheckResponse) + err := c.cc.Invoke(ctx, "/milvus.proto.backup.MilvusBackupService/Check", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // MilvusBackupServiceServer is the server API for MilvusBackupService service. type MilvusBackupServiceServer interface { // Create backup @@ -2737,6 +2923,8 @@ type MilvusBackupServiceServer interface { RestoreBackup(context.Context, *RestoreBackupRequest) (*RestoreBackupResponse, error) // Get restore state by given id GetRestore(context.Context, *GetRestoreStateRequest) (*RestoreBackupResponse, error) + // Check connections + Check(context.Context, *CheckRequest) (*CheckResponse, error) } // UnimplementedMilvusBackupServiceServer can be embedded to have forward compatible implementations. @@ -2761,6 +2949,9 @@ func (*UnimplementedMilvusBackupServiceServer) RestoreBackup(ctx context.Context func (*UnimplementedMilvusBackupServiceServer) GetRestore(ctx context.Context, req *GetRestoreStateRequest) (*RestoreBackupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetRestore not implemented") } +func (*UnimplementedMilvusBackupServiceServer) Check(ctx context.Context, req *CheckRequest) (*CheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} func RegisterMilvusBackupServiceServer(s *grpc.Server, srv MilvusBackupServiceServer) { s.RegisterService(&_MilvusBackupService_serviceDesc, srv) @@ -2874,6 +3065,24 @@ func _MilvusBackupService_GetRestore_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _MilvusBackupService_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MilvusBackupServiceServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/milvus.proto.backup.MilvusBackupService/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MilvusBackupServiceServer).Check(ctx, req.(*CheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _MilvusBackupService_serviceDesc = grpc.ServiceDesc{ ServiceName: "milvus.proto.backup.MilvusBackupService", HandlerType: (*MilvusBackupServiceServer)(nil), @@ -2902,6 +3111,10 @@ var _MilvusBackupService_serviceDesc = grpc.ServiceDesc{ MethodName: "GetRestore", Handler: _MilvusBackupService_GetRestore_Handler, }, + { + MethodName: "Check", + Handler: _MilvusBackupService_Check_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "backup.proto", diff --git a/core/storage/aliyun/aliyun.go b/core/storage/aliyun/aliyun.go new file mode 100644 index 00000000..b379805e --- /dev/null +++ b/core/storage/aliyun/aliyun.go @@ -0,0 +1,97 @@ +package aliyun + +import ( + "github.com/aliyun/credentials-go/credentials" // >= v1.2.6 + "github.com/cockroachdb/errors" + "github.com/minio/minio-go/v7" + minioCred "github.com/minio/minio-go/v7/pkg/credentials" + + "github.com/zilliztech/milvus-backup/internal/log" +) + +const OSSDefaultAddress = "oss.aliyuncs.com" + +// NewMinioClient returns a minio.Client which is compatible for aliyun OSS +func NewMinioClient(address string, opts *minio.Options) (*minio.Client, error) { + if opts == nil { + opts = &minio.Options{} + } + if opts.Creds == nil { + credProvider, err := NewCredentialProvider() + if err != nil { + return nil, errors.Wrap(err, "failed to create credential provider") + } + opts.Creds = minioCred.New(credProvider) + } + if address == "" { + address = OSSDefaultAddress + opts.Secure = true + } + return minio.New(address, opts) +} + +// Credential is defined to mock aliyun credential.Credentials +// +//go:generate mockery --name=Credential --with-expecter +type Credential interface { + credentials.Credential +} + +// CredentialProvider implements "github.com/minio/minio-go/v7/pkg/credentials".Provider +// also implements transport +type CredentialProvider struct { + // aliyunCreds doesn't provide a way to get the expire time, so we use the cache to check if it's expired + // when aliyunCreds.GetAccessKeyId is different from the cache, we know it's expired + akCache string + aliyunCreds Credential +} + +func NewCredentialProvider() (minioCred.Provider, error) { + aliyunCreds, err := credentials.NewCredential(nil) + if err != nil { + return nil, errors.Wrap(err, "failed to create aliyun credential") + } + // backend, err := minio.DefaultTransport(true) + // if err != nil { + // return nil, errors.Wrap(err, "failed to create default transport") + // } + return &CredentialProvider{aliyunCreds: aliyunCreds}, nil +} + +// Retrieve returns nil if it successfully retrieved the value. +// Error is returned if the value were not obtainable, or empty. +// according to the caller minioCred.Credentials.Get(), +// it already has a lock, so we don't need to worry about concurrency +func (c *CredentialProvider) Retrieve() (minioCred.Value, error) { + ret := minioCred.Value{} + ak, err := c.aliyunCreds.GetAccessKeyId() + if err != nil { + return ret, errors.Wrap(err, "failed to get access key id from aliyun credential") + } + ret.AccessKeyID = *ak + sk, err := c.aliyunCreds.GetAccessKeySecret() + if err != nil { + return minioCred.Value{}, errors.Wrap(err, "failed to get access key secret from aliyun credential") + } + securityToken, err := c.aliyunCreds.GetSecurityToken() + if err != nil { + return minioCred.Value{}, errors.Wrap(err, "failed to get security token from aliyun credential") + } + ret.SecretAccessKey = *sk + c.akCache = *ak + ret.SessionToken = *securityToken + return ret, nil +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// according to the caller minioCred.Credentials.IsExpired(), +// it already has a lock, so we don't need to worry about concurrency +func (c CredentialProvider) IsExpired() bool { + ak, err := c.aliyunCreds.GetAccessKeyId() + if err != nil { + log.Warn("failed to get access key id from aliyun credential, assume it's expired") + return true + } + return *ak != c.akCache +} diff --git a/core/storage/azure_chunk_manager.go b/core/storage/azure_chunk_manager.go new file mode 100644 index 00000000..9d774cf9 --- /dev/null +++ b/core/storage/azure_chunk_manager.go @@ -0,0 +1,442 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "context" + "fmt" + "io" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/cockroachdb/errors" + "github.com/minio/minio-go/v7" + "go.uber.org/zap" + "golang.org/x/exp/mmap" + "golang.org/x/sync/errgroup" + + "github.com/zilliztech/milvus-backup/internal/log" + "github.com/zilliztech/milvus-backup/internal/util/errorutil" +) + +// AzureChunkManager is responsible for read and write data stored in minio. +type AzureChunkManager struct { + client *AzureObjectStorage + + //cli *azblob.Client + // ctx context.Context + bucketName string + rootPath string +} + +var _ ChunkManager = (*AzureChunkManager)(nil) + +func NewAzureChunkManager(ctx context.Context, c *config) (*AzureChunkManager, error) { + client, err := newAzureObjectStorageWithConfig(ctx, c) + if err != nil { + return nil, err + } + + //cli, err := NewAzureClient(ctx, c) + //if err != nil { + // return nil, err + //} + mcm := &AzureChunkManager{ + client: client, + //cli: cli, + bucketName: c.bucketName, + rootPath: strings.TrimLeft(c.rootPath, "/"), + } + log.Info("Azure chunk manager init success.", zap.String("bucketname", c.bucketName), zap.String("root", mcm.RootPath())) + return mcm, nil +} + +// RootPath returns minio root path. +func (mcm *AzureChunkManager) RootPath() string { + return mcm.rootPath +} + +func (mcm *AzureChunkManager) Copy(ctx context.Context, fromBucketName string, toBucketName string, fromPath string, toPath string) error { + objectkeys, _, err := mcm.ListWithPrefix(ctx, fromBucketName, fromPath, true) + if err != nil { + log.Warn("listWithPrefix error", zap.String("prefix", fromPath), zap.Error(err)) + return err + } + for _, objectkey := range objectkeys { + dstObjectKey := strings.Replace(objectkey, fromPath, toPath, 1) + err := mcm.client.CopyObject(ctx, fromBucketName, toBucketName, objectkey, dstObjectKey) + if err != nil { + log.Error("copyObject error", zap.String("srcObjectKey", objectkey), zap.String("dstObjectKey", dstObjectKey), zap.Error(err)) + return err + } + } + return nil +} + +// Path returns the path of minio data if exists. +func (mcm *AzureChunkManager) Path(ctx context.Context, bucketName string, filePath string) (string, error) { + exist, err := mcm.Exist(ctx, bucketName, filePath) + if err != nil { + return "", err + } + if !exist { + return "", errors.New("minio file manage cannot be found with filePath:" + filePath) + } + return filePath, nil +} + +// Reader returns the path of minio data if exists. +func (mcm *AzureChunkManager) Reader(ctx context.Context, bucketName string, filePath string) (FileReader, error) { + reader, err := mcm.getObject(ctx, bucketName, filePath, int64(0), int64(0)) + if err != nil { + log.Warn("failed to get object", zap.String("bucket", bucketName), zap.String("path", filePath), zap.Error(err)) + return nil, err + } + return reader, nil +} + +func (mcm *AzureChunkManager) Size(ctx context.Context, bucketName string, filePath string) (int64, error) { + objectInfo, err := mcm.getObjectSize(ctx, bucketName, filePath) + if err != nil { + log.Warn("failed to stat object", zap.String("bucket", bucketName), zap.String("path", filePath), zap.Error(err)) + return 0, err + } + + return objectInfo, nil +} + +// +// Write writes the data to minio storage. +func (mcm *AzureChunkManager) Write(ctx context.Context, bucketName string, filePath string, content []byte) error { + err := mcm.putObject(ctx, bucketName, filePath, bytes.NewReader(content), int64(len(content))) + if err != nil { + log.Warn("failed to put object", zap.String("bucket", bucketName), zap.String("path", filePath), zap.Error(err)) + return err + } + + return nil +} + +// MultiWrite saves multiple objects, the path is the key of @kvs. +// The object value is the value of @kvs. +func (mcm *AzureChunkManager) MultiWrite(ctx context.Context, bucketName string, kvs map[string][]byte) error { + var el error + for key, value := range kvs { + err := mcm.Write(ctx, bucketName, key, value) + if err != nil { + el = errors.New(fmt.Sprintf("failed to write %s", key)) + } + } + return el +} + +// Exist checks whether chunk is saved to minio storage. +func (mcm *AzureChunkManager) Exist(ctx context.Context, bucketName string, filePath string) (bool, error) { + _, err := mcm.getObjectSize(ctx, mcm.bucketName, filePath) + if err != nil { + if IsErrNoSuchKey(err) { + return false, nil + } + log.Warn("failed to stat object", zap.String("bucket", mcm.bucketName), zap.String("path", filePath), zap.Error(err)) + return false, err + } + return true, nil +} + +// Read reads the minio storage data if exists. +func (mcm *AzureChunkManager) Read(ctx context.Context, bucketName string, filePath string) ([]byte, error) { + object, err := mcm.getObject(ctx, bucketName, filePath, int64(0), int64(0)) + if err != nil { + log.Warn("failed to get object", zap.String("bucket", mcm.bucketName), zap.String("path", filePath), zap.Error(err)) + return nil, err + } + defer object.Close() + + // Prefetch object data + var empty []byte + _, err = object.Read(empty) + if err != nil { + errResponse := minio.ToErrorResponse(err) + if errResponse.Code == "NoSuchKey" { + return nil, WrapErrNoSuchKey(filePath) + } + log.Warn("failed to read object", zap.String("path", filePath), zap.Error(err)) + return nil, err + } + size, err := mcm.getObjectSize(ctx, mcm.bucketName, filePath) + if err != nil { + log.Warn("failed to stat object", zap.String("bucket", bucketName), zap.String("path", filePath), zap.Error(err)) + return nil, err + } + data, err := Read(object, size) + if err != nil { + errResponse := minio.ToErrorResponse(err) + if errResponse.Code == "NoSuchKey" { + return nil, WrapErrNoSuchKey(filePath) + } + log.Warn("failed to read object", zap.String("bucket", bucketName), zap.String("path", filePath), zap.Error(err)) + return nil, err + } + return data, nil +} + +func (mcm *AzureChunkManager) MultiRead(ctx context.Context, bucketName string, keys []string) ([][]byte, error) { + var el error + var objectsValues [][]byte + for _, key := range keys { + objectValue, err := mcm.Read(ctx, bucketName, key) + if err != nil { + el = errors.New(fmt.Sprintf("failed to read %s %s", bucketName, key)) + } + objectsValues = append(objectsValues, objectValue) + } + + return objectsValues, el +} + +func (mcm *AzureChunkManager) ReadWithPrefix(ctx context.Context, bucketName string, prefix string) ([]string, [][]byte, error) { + objectsKeys, _, err := mcm.ListWithPrefix(ctx, bucketName, prefix, true) + if err != nil { + return nil, nil, err + } + objectsValues, err := mcm.MultiRead(ctx, bucketName, objectsKeys) + if err != nil { + return nil, nil, err + } + + return objectsKeys, objectsValues, nil +} + +func (mcm *AzureChunkManager) Mmap(ctx context.Context, bucketName string, filePath string) (*mmap.ReaderAt, error) { + return nil, errors.New("this method has not been implemented") +} + +// ReadAt reads specific position data of minio storage if exists. +func (mcm *AzureChunkManager) ReadAt(ctx context.Context, bucketName string, filePath string, off int64, length int64) ([]byte, error) { + return nil, errors.New("this method has not been implemented") + //if off < 0 || length < 0 { + // return nil, io.EOF + //} + // + //object, err := mcm.getObject(ctx, bucketName, filePath, off, length) + //if err != nil { + // log.Warn("failed to get object", zap.String("bucket", bucketName), zap.String("path", filePath), zap.Error(err)) + // return nil, err + //} + //defer object.Close() + // + //data, err := Read(object, length) + //if err != nil { + // errResponse := minio.ToErrorResponse(err) + // if errResponse.Code == "NoSuchKey" { + // return nil, WrapErrNoSuchKey(filePath) + // } + // log.Warn("failed to read object", zap.String("bucket", bucketName), zap.String("path", filePath), zap.Error(err)) + // return nil, err + //} + //return data, nil +} + +// Remove deletes an object with @key. +func (mcm *AzureChunkManager) Remove(ctx context.Context, bucketName string, filePath string) error { + err := mcm.removeObject(ctx, bucketName, filePath) + if err != nil { + log.Warn("failed to remove object", zap.String("bucket", bucketName), zap.String("path", filePath), zap.Error(err)) + return err + } + return nil +} + +// MultiRemove deletes a objects with @keys. +func (mcm *AzureChunkManager) MultiRemove(ctx context.Context, bucketName string, keys []string) error { + var el errorutil.ErrorList + for _, key := range keys { + err := mcm.Remove(ctx, bucketName, key) + if err != nil { + el = append(el, err) + } + } + if len(el) == 0 { + return nil + } + return el +} + +// RemoveWithPrefix removes all objects with the same prefix @prefix from minio. +func (mcm *AzureChunkManager) RemoveWithPrefix(ctx context.Context, bucketName string, prefix string) error { + objects, err := mcm.listObjects(ctx, bucketName, prefix, true) + if err != nil { + return err + } + removeKeys := make([]string, 0) + for key := range objects { + removeKeys = append(removeKeys, key) + } + i := 0 + maxGoroutine := 10 + for i < len(removeKeys) { + runningGroup, groupCtx := errgroup.WithContext(ctx) + for j := 0; j < maxGoroutine && i < len(removeKeys); j++ { + key := removeKeys[i] + runningGroup.Go(func() error { + err := mcm.removeObject(groupCtx, bucketName, key) + if err != nil { + log.Warn("failed to remove object", zap.String("bucket", bucketName), zap.String("path", key), zap.Error(err)) + return err + } + return nil + }) + i++ + } + if err := runningGroup.Wait(); err != nil { + return err + } + } + return nil +} + +// ListWithPrefix returns objects with provided prefix. +func (mcm *AzureChunkManager) ListWithPrefix(ctx context.Context, bucketName string, prefix string, recursive bool) ([]string, []int64, error) { + objects, err := mcm.listObjects(ctx, bucketName, prefix, false) + if err != nil { + return nil, nil, err + } + if recursive { + var objectsKeys []string + var sizes []int64 + for object, contentLength := range objects { + objectsKeys = append(objectsKeys, object) + sizes = append(sizes, contentLength) + } + return objectsKeys, sizes, nil + } else { + var objectsKeys []string + var sizes []int64 + objectsKeysDict := make(map[string]bool, 0) + for object, _ := range objects { + keyWithoutPrefix := strings.Replace(object, prefix, "", 1) + if strings.Contains(keyWithoutPrefix, "/") { + var key string + if strings.HasPrefix(keyWithoutPrefix, "/") { + key = prefix + "/" + strings.Split(keyWithoutPrefix, "/")[1] + "/" + } else { + key = prefix + strings.Split(keyWithoutPrefix, "/")[0] + "/" + } + if _, exist := objectsKeysDict[key]; !exist { + objectsKeys = append(objectsKeys, key) + sizes = append(sizes, 0) + objectsKeysDict[key] = true + } + } else { + key := prefix + keyWithoutPrefix + if _, exist := objectsKeysDict[key]; !exist { + objectsKeys = append(objectsKeys, key) + sizes = append(sizes, 0) + objectsKeysDict[key] = true + } + } + } + return objectsKeys, sizes, nil + } + + //var objectsKeys []string + //var sizes []int64 + //tasks := list.New() + //tasks.PushBack(prefix) + //for tasks.Len() > 0 { + // e := tasks.Front() + // pre := e.Value.(string) + // tasks.Remove(e) + // + // // TODO add concurrent call if performance matters + // // only return current level per call + // objects, err := mcm.listObjects(ctx, bucketName, pre, false) + // if err != nil { + // return nil, nil, err + // } + // + // for object, contentLength := range objects { + // // with tailing "/", object is a "directory" + // if strings.HasSuffix(object, "/") && recursive { + // // enqueue when recursive is true + // if object != pre { + // tasks.PushBack(object) + // } + // continue + // } + // objectsKeys = append(objectsKeys, object) + // sizes = append(sizes, contentLength) + // } + //} + // + //return objectsKeys, sizes, nil +} + +func (mcm *AzureChunkManager) getObject(ctx context.Context, bucketName, objectName string, offset int64, size int64) (FileReader, error) { + //resp, err := mcm.cli.DownloadStream(ctx, bucketName, objectName, nil) + //if err != nil { + // return nil, fmt.Errorf("storage: azure download stream %w", err) + //} + //return resp.Body, nil + + reader, err := mcm.client.GetObject(ctx, bucketName, objectName, offset, size) + switch err := err.(type) { + case *azcore.ResponseError: + if err.ErrorCode == string(bloberror.BlobNotFound) { + return nil, WrapErrNoSuchKey(objectName) + } + //case minio.ErrorResponse: + // if err.Code == "NoSuchKey" { + // return nil, WrapErrNoSuchKey(objectName) + // } + } + return reader, err +} + +func (mcm *AzureChunkManager) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64) error { + err := mcm.client.PutObject(ctx, bucketName, objectName, reader, objectSize) + return err +} + +func (mcm *AzureChunkManager) getObjectSize(ctx context.Context, bucketName, objectName string) (int64, error) { + info, err := mcm.client.StatObject(ctx, bucketName, objectName) + + switch err := err.(type) { + case *azcore.ResponseError: + if err.ErrorCode == string(bloberror.BlobNotFound) { + return info, WrapErrNoSuchKey(objectName) + } + //case minio.ErrorResponse: + // if err.Code == "NoSuchKey" { + // return nil, WrapErrNoSuchKey(objectName) + // } + } + + return info, err +} + +func (mcm *AzureChunkManager) listObjects(ctx context.Context, bucketName string, prefix string, recursive bool) (map[string]int64, error) { + res, err := mcm.client.ListObjects(ctx, bucketName, prefix, recursive) + return res, err +} + +func (mcm *AzureChunkManager) removeObject(ctx context.Context, bucketName, objectName string) error { + err := mcm.client.RemoveObject(ctx, bucketName, objectName) + return err +} diff --git a/core/storage/azure_object_storage.go b/core/storage/azure_object_storage.go new file mode 100644 index 00000000..6bed8124 --- /dev/null +++ b/core/storage/azure_object_storage.go @@ -0,0 +1,169 @@ +// Licensed to the LF AI & Data foundation under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + + "github.com/zilliztech/milvus-backup/internal/util/retry" +) + +type AzureObjectStorage struct { + Client *service.Client + config *config +} + +type AzureClient struct { + cli *azblob.Client +} + +func NewAzureClient(ctx context.Context, cfg *config) (*azblob.Client, error) { + cred, err := azblob.NewSharedKeyCredential(cfg.accessKeyID, cfg.secretAccessKeyID) + if err != nil { + return nil, fmt.Errorf("storage: new azure shared key credential %w", err) + } + endpoint := fmt.Sprintf("https://%s.blob.core.windows.net", cfg.accessKeyID) + cli, err := azblob.NewClientWithSharedKeyCredential(endpoint, cred, nil) + if err != nil { + return nil, fmt.Errorf("storage: new azure client %w", err) + } + + return cli, nil +} + +func newAzureObjectStorageWithConfig(ctx context.Context, c *config) (*AzureObjectStorage, error) { + var client *service.Client + var err error + if c.useIAM { + cred, credErr := azidentity.NewWorkloadIdentityCredential(&azidentity.WorkloadIdentityCredentialOptions{ + ClientID: os.Getenv("AZURE_CLIENT_ID"), + TenantID: os.Getenv("AZURE_TENANT_ID"), + TokenFilePath: os.Getenv("AZURE_FEDERATED_TOKEN_FILE"), + }) + if credErr != nil { + return nil, credErr + } + client, err = service.NewClient("https://"+c.accessKeyID+".blob."+c.address+"/", cred, &service.ClientOptions{}) + } else { + connectionString := os.Getenv("AZURE_STORAGE_CONNECTION_STRING") + if connectionString == "" { + connectionString = "DefaultEndpointsProtocol=https;AccountName=" + c.accessKeyID + + ";AccountKey=" + c.secretAccessKeyID + ";EndpointSuffix=" + c.address + } + client, err = service.NewClientFromConnectionString(connectionString, &service.ClientOptions{}) + } + if err != nil { + return nil, err + } + if c.bucketName == "" { + return nil, fmt.Errorf("invalid bucket name") + } + // check valid in first query + checkBucketFn := func() error { + _, err := client.NewContainerClient(c.bucketName).GetProperties(ctx, &container.GetPropertiesOptions{}) + if err != nil { + switch err := err.(type) { + case *azcore.ResponseError: + if c.createBucket && err.ErrorCode == string(bloberror.ContainerNotFound) { + _, createErr := client.NewContainerClient(c.bucketName).Create(ctx, &azblob.CreateContainerOptions{}) + if createErr != nil { + return createErr + } + return nil + } + } + } + return err + } + err = retry.Do(ctx, checkBucketFn, retry.Attempts(CheckBucketRetryAttempts)) + if err != nil { + return nil, err + } + return &AzureObjectStorage{Client: client, config: c}, nil +} + +func (aos *AzureObjectStorage) GetObject(ctx context.Context, bucketName, objectName string, offset int64, size int64) (FileReader, error) { + opts := azblob.DownloadStreamOptions{} + if offset > 0 { + opts.Range = azblob.HTTPRange{ + Offset: offset, + Count: size, + } + } + object, err := aos.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName).DownloadStream(ctx, &opts) + + if err != nil { + return nil, err + } + return object.Body, nil +} + +func (aos *AzureObjectStorage) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64) error { + _, err := aos.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName).UploadStream(ctx, reader, &azblob.UploadStreamOptions{}) + return err +} + +func (aos *AzureObjectStorage) StatObject(ctx context.Context, bucketName, objectName string) (int64, error) { + info, err := aos.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName).GetProperties(ctx, &blob.GetPropertiesOptions{}) + if err != nil { + return 0, err + } + return *info.ContentLength, nil +} + +func (aos *AzureObjectStorage) ListObjects(ctx context.Context, bucketName string, prefix string, recursive bool) (map[string]int64, error) { + pager := aos.Client.NewContainerClient(bucketName).NewListBlobsFlatPager(&azblob.ListBlobsFlatOptions{ + Prefix: &prefix, + }) + // pager := aos.Client.NewContainerClient(bucketName).NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{ + // Prefix: &prefix, + // }) + + objects := map[string]int64{} + if pager.More() { + pageResp, err := pager.NextPage(context.Background()) + if err != nil { + return nil, err + } + for _, blob := range pageResp.Segment.BlobItems { + objects[*blob.Name] = *blob.Properties.ContentLength + } + } + return objects, nil +} + +func (aos *AzureObjectStorage) RemoveObject(ctx context.Context, bucketName, objectName string) error { + _, err := aos.Client.NewContainerClient(bucketName).NewBlockBlobClient(objectName).Delete(ctx, &blob.DeleteOptions{}) + return err +} + +func (aos *AzureObjectStorage) CopyObject(ctx context.Context, fromBucketName, toBucketName, fromPath, toPath string) error { + fromPathUrl := fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", aos.config.accessKeyID, fromBucketName, fromPath) + _, err := aos.Client.NewContainerClient(toBucketName).NewBlockBlobClient(toPath).StartCopyFromURL(ctx, fromPathUrl, nil) + return err +} diff --git a/core/storage/chunk_manager.go b/core/storage/chunk_manager.go new file mode 100644 index 00000000..cbd6158d --- /dev/null +++ b/core/storage/chunk_manager.go @@ -0,0 +1,62 @@ +package storage + +import ( + "context" + + "github.com/zilliztech/milvus-backup/core/paramtable" +) + +func NewChunkManager(ctx context.Context, params paramtable.BackupParams) (ChunkManager, error) { + engine := params.MinioCfg.CloudProvider + if engine == "azure" { + return newAzureChunkManagerWithParams(ctx, params) + } else { + return newMinioChunkManagerWithParams(ctx, params) + } + //switch engine { + //case "local": + // return newMinioChunkManagerWithParams(ctx, params) + // //return NewLocalChunkManager(RootPath(f.config.rootPath)), nil + //case "minio": + //case "s3": + //case "gcp": + //case "aliyun": + // return newMinioChunkManagerWithParams(ctx, params) + //case "azure": + // return newAzureChunkManagerWithParams(ctx, params) + //default: + // return nil, errors.New("no chunk manager implemented with engine: " + engine) + //} +} + +func newMinioChunkManagerWithParams(ctx context.Context, params paramtable.BackupParams) (*MinioChunkManager, error) { + c := newDefaultConfig() + c.address = params.MinioCfg.Address + ":" + params.MinioCfg.Port + c.accessKeyID = params.MinioCfg.AccessKeyID + c.secretAccessKeyID = params.MinioCfg.SecretAccessKey + c.useSSL = params.MinioCfg.UseSSL + c.bucketName = params.MinioCfg.BackupBucketName + c.rootPath = params.MinioCfg.RootPath + c.cloudProvider = params.MinioCfg.CloudProvider + c.storageEngine = params.MinioCfg.StorageType + c.useIAM = params.MinioCfg.UseIAM + c.iamEndpoint = params.MinioCfg.IAMEndpoint + c.createBucket = true + return newMinioChunkManagerWithConfig(ctx, c) +} + +func newAzureChunkManagerWithParams(ctx context.Context, params paramtable.BackupParams) (*AzureChunkManager, error) { + c := newDefaultConfig() + c.address = params.MinioCfg.Address + ":" + params.MinioCfg.Port + c.accessKeyID = params.MinioCfg.AccessKeyID + c.secretAccessKeyID = params.MinioCfg.SecretAccessKey + c.useSSL = params.MinioCfg.UseSSL + c.bucketName = params.MinioCfg.BackupBucketName + c.rootPath = params.MinioCfg.RootPath + c.cloudProvider = params.MinioCfg.CloudProvider + c.storageEngine = params.MinioCfg.StorageType + c.useIAM = params.MinioCfg.UseIAM + c.iamEndpoint = params.MinioCfg.IAMEndpoint + c.createBucket = true + return NewAzureChunkManager(ctx, c) +} diff --git a/core/storage/minio_chunk_manager.go b/core/storage/minio_chunk_manager.go index fbac42dc..fb1da8d6 100644 --- a/core/storage/minio_chunk_manager.go +++ b/core/storage/minio_chunk_manager.go @@ -6,10 +6,12 @@ import ( "errors" "fmt" "github.com/zilliztech/milvus-backup/core/paramtable" + "github.com/zilliztech/milvus-backup/core/storage/aliyun" "github.com/zilliztech/milvus-backup/core/storage/gcp" "github.com/zilliztech/milvus-backup/internal/log" "github.com/zilliztech/milvus-backup/internal/util/errorutil" "github.com/zilliztech/milvus-backup/internal/util/retry" + "golang.org/x/sync/errgroup" "io" "strings" @@ -19,6 +21,8 @@ import ( "golang.org/x/exp/mmap" ) +const NoSuchKey = "NoSuchKey" + var ( ErrNoSuchKey = errors.New("NoSuchKey") ) @@ -27,6 +31,10 @@ func WrapErrNoSuchKey(key string) error { return fmt.Errorf("%w(key=%s)", ErrNoSuchKey, key) } +func IsErrNoSuchKey(err error) bool { + return strings.HasPrefix(err.Error(), NoSuchKey) +} + var CheckBucketRetryAttempts uint = 20 // MinioChunkManager is responsible for read and write data stored in minio. @@ -54,8 +62,17 @@ func NewMinioChunkManager(ctx context.Context, opts ...Option) (*MinioChunkManag func newMinioChunkManagerWithConfig(ctx context.Context, c *config) (*MinioChunkManager, error) { var creds *credentials.Credentials var newMinioFn = minio.New + var bucketLookupType = minio.BucketLookupAuto switch c.cloudProvider { + case paramtable.CloudProviderAliyun: + // auto doesn't work for aliyun, so we set to dns deliberately + bucketLookupType = minio.BucketLookupDNS + if c.useIAM { + newMinioFn = aliyun.NewMinioClient + } else { + creds = credentials.NewStaticV4(c.accessKeyID, c.secretAccessKeyID, "") + } case paramtable.CloudProviderGCP: newMinioFn = gcp.NewMinioClient if !c.useIAM { @@ -69,8 +86,9 @@ func newMinioChunkManagerWithConfig(ctx context.Context, c *config) (*MinioChunk } } minioOpts := &minio.Options{ - Creds: creds, - Secure: c.useSSL, + BucketLookup: bucketLookupType, + Creds: creds, + Secure: c.useSSL, } minIOClient, err := newMinioFn(c.address, minioOpts) // options nil or invalid formatted endpoint, don't need to retry @@ -328,11 +346,32 @@ func (mcm *MinioChunkManager) MultiRemove(ctx context.Context, bucketName string // RemoveWithPrefix removes all objects with the same prefix @prefix from minio. func (mcm *MinioChunkManager) RemoveWithPrefix(ctx context.Context, bucketName string, prefix string) error { - objects := mcm.Client.ListObjects(ctx, mcm.bucketName, minio.ListObjectsOptions{Prefix: prefix, Recursive: true}) - for rErr := range mcm.Client.RemoveObjects(ctx, bucketName, objects, minio.RemoveObjectsOptions{GovernanceBypass: false}) { - if rErr.Err != nil { - log.Warn("failed to remove objects", zap.String("prefix", prefix), zap.Error(rErr.Err)) - return rErr.Err + objects := mcm.Client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{Prefix: prefix, Recursive: true}) + i := 0 + maxGoroutine := 10 + removeKeys := make([]string, 0, len(objects)) + for object := range objects { + if object.Err != nil { + return object.Err + } + removeKeys = append(removeKeys, object.Key) + } + for i < len(removeKeys) { + runningGroup, groupCtx := errgroup.WithContext(ctx) + for j := 0; j < maxGoroutine && i < len(removeKeys); j++ { + key := removeKeys[i] + runningGroup.Go(func() error { + err := mcm.Client.RemoveObject(groupCtx, bucketName, key, minio.RemoveObjectOptions{}) + if err != nil { + log.Warn("failed to remove object", zap.String("path", key), zap.Error(err)) + return err + } + return nil + }) + i++ + } + if err := runningGroup.Wait(); err != nil { + return err } } return nil diff --git a/core/storage/options.go b/core/storage/options.go index 8852dd2a..2b99a5db 100644 --- a/core/storage/options.go +++ b/core/storage/options.go @@ -10,8 +10,10 @@ type config struct { createBucket bool rootPath string useIAM bool - cloudProvider string iamEndpoint string + + cloudProvider string + storageEngine string } func newDefaultConfig() *config { @@ -68,12 +70,6 @@ func UseIAM(useIAM bool) Option { } } -func CloudProvider(cloudProvider string) Option { - return func(c *config) { - c.cloudProvider = cloudProvider - } -} - func IAMEndpoint(iamEndpoint string) Option { return func(c *config) { c.iamEndpoint = iamEndpoint diff --git a/core/storage/types.go b/core/storage/types.go index 6255e128..bfc62191 100644 --- a/core/storage/types.go +++ b/core/storage/types.go @@ -36,6 +36,7 @@ type ChunkManager interface { ListWithPrefix(ctx context.Context, bucketName string, prefix string, recursive bool) ([]string, []int64, error) // ReadWithPrefix reads files with same @prefix and returns contents. ReadWithPrefix(ctx context.Context, bucketName string, prefix string) ([]string, [][]byte, error) + // Not use Mmap(ctx context.Context, bucketName string, filePath string) (*mmap.ReaderAt, error) // ReadAt reads @filePath by offset @off, content stored in @p, return @n as the number of bytes read. // if all bytes are read, @err is io.EOF. diff --git a/core/utils/backup_request_util.go b/core/utils/backup_request_util.go new file mode 100644 index 00000000..160fed85 --- /dev/null +++ b/core/utils/backup_request_util.go @@ -0,0 +1,57 @@ +package utils + +import ( + "github.com/golang/protobuf/jsonpb" + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/zilliztech/milvus-backup/core/proto/backuppb" +) + +func GetCreateDBCollections(request *backuppb.CreateBackupRequest) string { + fieldValue := request.GetDbCollections() + if fieldValue == nil { + return "" + } + switch fieldValue.Kind.(type) { + case *structpb.Value_StringValue: + strVal := fieldValue.GetStringValue() + return strVal + case *structpb.Value_StructValue: + jsonStruct := fieldValue.GetStructValue() + jsonStr, err := (&jsonpb.Marshaler{}).MarshalToString(jsonStruct) + if err != nil { + return "" + } + return jsonStr + default: + return "" + } +} + +func GetRestoreDBCollections(request *backuppb.RestoreBackupRequest) string { + fieldValue := request.GetDbCollections() + if fieldValue == nil { + return "" + } + switch fieldValue.Kind.(type) { + case *structpb.Value_StringValue: + strVal := fieldValue.GetStringValue() + return strVal + case *structpb.Value_StructValue: + jsonStruct := fieldValue.GetStructValue() + jsonStr, err := (&jsonpb.Marshaler{}).MarshalToString(jsonStruct) + if err != nil { + return "" + } + return jsonStr + default: + return "" + } +} + +func WrapDBCollections(dbCollections string) *structpb.Value { + return &structpb.Value{ + Kind: &structpb.Value_StringValue{ + StringValue: dbCollections, + }, + } +} diff --git a/core/utils/convert_util.go b/core/utils/convert_util.go index 9f4321e1..3e18ae2c 100644 --- a/core/utils/convert_util.go +++ b/core/utils/convert_util.go @@ -4,13 +4,15 @@ import ( "encoding/json" "errors" "fmt" + "time" "github.com/zilliztech/milvus-backup/core/proto/backuppb" ) const ( - logicalBits = 18 - PARAMS = "params" + logicalBits = 18 + logicalBitsMask = (1 << logicalBits) - 1 + PARAMS = "params" ) // ComposeTS returns a timestamp composed of physical part and logical part @@ -18,6 +20,14 @@ func ComposeTS(physical, logical int64) uint64 { return uint64((physical << logicalBits) + logical) } +// ParseTS returns a timestamp composed of physical part and logical part +func ParseTS(ts uint64) (time.Time, uint64) { + logical := ts & logicalBitsMask + physical := ts >> logicalBits + physicalTime := time.Unix(int64(physical/1000), int64(physical)%1000*time.Millisecond.Nanoseconds()) + return physicalTime, logical +} + // kvPairToMap largely copied from internal/proxy/task.go#parseIndexParams func KVPairToMap(m []*backuppb.KeyValuePair) (map[string]string, error) { params := make(map[string]string) diff --git a/core/utils/convert_util_test.go b/core/utils/convert_util_test.go new file mode 100644 index 00000000..268edda6 --- /dev/null +++ b/core/utils/convert_util_test.go @@ -0,0 +1,13 @@ +package utils + +import ( + "testing" +) + +func TestTs(t *testing.T) { + ts := 443727974068387848 + time, logical := ParseTS(uint64(ts)) + println(time.Unix()) + println(logical) + +} diff --git a/deployment/cluster/docker-compose.yml b/deployment/cluster/docker-compose.yml index 32d95c95..1033af41 100644 --- a/deployment/cluster/docker-compose.yml +++ b/deployment/cluster/docker-compose.yml @@ -54,7 +54,7 @@ services: rootcoord: container_name: milvus-rootcoord - image: milvusdb/milvus:2.2.0-latest + image: milvusdb/milvus:master-latest command: ["milvus", "run", "rootcoord"] environment: ETCD_ENDPOINTS: etcd:2379 @@ -71,7 +71,7 @@ services: proxy: container_name: milvus-proxy - image: milvusdb/milvus:2.2.0-latest + image: milvusdb/milvus:master-latest command: ["milvus", "run", "proxy"] environment: ETCD_ENDPOINTS: etcd:2379 @@ -83,7 +83,7 @@ services: querycoord: container_name: milvus-querycoord - image: milvusdb/milvus:2.2.0-latest + image: milvusdb/milvus:master-latest command: ["milvus", "run", "querycoord"] environment: ETCD_ENDPOINTS: etcd:2379 @@ -99,7 +99,7 @@ services: querynode: container_name: milvus-querynode - image: milvusdb/milvus:2.2.0-latest + image: milvusdb/milvus:master-latest command: ["milvus", "run", "querynode"] environment: ETCD_ENDPOINTS: etcd:2379 @@ -110,7 +110,7 @@ services: indexcoord: container_name: milvus-indexcoord - image: milvusdb/milvus:2.2.0-latest + image: milvusdb/milvus:master-latest command: ["milvus", "run", "indexcoord"] environment: ETCD_ENDPOINTS: etcd:2379 @@ -126,7 +126,7 @@ services: indexnode: container_name: milvus-indexnode - image: milvusdb/milvus:2.2.0-latest + image: milvusdb/milvus:master-latest command: ["milvus", "run", "indexnode"] environment: ETCD_ENDPOINTS: etcd:2379 @@ -138,7 +138,7 @@ services: datacoord: container_name: milvus-datacoord - image: milvusdb/milvus:2.2.0-latest + image: milvusdb/milvus:master-latest command: ["milvus", "run", "datacoord"] environment: ETCD_ENDPOINTS: etcd:2379 @@ -154,7 +154,7 @@ services: datanode: container_name: milvus-datanode - image: milvusdb/milvus:2.2.0-latest + image: milvusdb/milvus:master-latest command: ["milvus", "run", "datanode"] environment: ETCD_ENDPOINTS: etcd:2379 @@ -168,7 +168,7 @@ services: # ---------------------------------------------------------------------------------------- # datanode-1: # container_name: milvus-datanode-1 -# image: milvusdb/milvus:2.2.0-latest +# image: milvusdb/milvus:master-latest # command: ["milvus", "run", "datanode"] # environment: # ETCD_ENDPOINTS: etcd:2379 @@ -179,7 +179,7 @@ services: # # datanode-2: # container_name: milvus-datanode-2 -# image: milvusdb/milvus:2.2.0-latest +# image: milvusdb/milvus:master-latest # command: ["milvus", "run", "datanode"] # environment: # ETCD_ENDPOINTS: etcd:2379 @@ -190,7 +190,7 @@ services: # # datanode-3: # container_name: milvus-datanode-3 -# image: milvusdb/milvus:2.2.0-latest +# image: milvusdb/milvus:master-latest # command: ["milvus", "run", "datanode"] # environment: # ETCD_ENDPOINTS: etcd:2379 diff --git a/deployment/cluster/values.yaml b/deployment/cluster/values.yaml index c1737fbb..483730d4 100644 --- a/deployment/cluster/values.yaml +++ b/deployment/cluster/values.yaml @@ -3,7 +3,7 @@ cluster: image: all: repository: milvusdb/milvus - tag: 2.2.0-latest + tag: master-latest pullPolicy: IfNotPresent etcd: diff --git a/deployment/standalone/docker-compose.yml b/deployment/standalone/docker-compose.yml index 84b733f0..b2b288cc 100644 --- a/deployment/standalone/docker-compose.yml +++ b/deployment/standalone/docker-compose.yml @@ -32,7 +32,7 @@ services: standalone: container_name: milvus-standalone - image: milvusdb/milvus:2.2.0-latest + image: milvusdb/milvus:master-latest command: ["milvus", "run", "standalone"] environment: ETCD_ENDPOINTS: etcd:2379 diff --git a/deployment/standalone/values.yaml b/deployment/standalone/values.yaml index fb286de6..5e546eae 100644 --- a/deployment/standalone/values.yaml +++ b/deployment/standalone/values.yaml @@ -3,7 +3,7 @@ cluster: image: all: repository: milvusdb/milvus - tag: 2.2.0-latest + tag: master-latest pullPolicy: IfNotPresent kafka: diff --git a/docs/docs.go b/docs/docs.go index c5ed2a00..747a2956 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -446,6 +446,9 @@ const docTemplate = `{ "description": { "type": "string" }, + "enable_dynamic_field": { + "type": "boolean" + }, "fields": { "type": "array", "items": { @@ -492,6 +495,14 @@ const docTemplate = `{ "type": "string" } }, + "db_collections": { + "description": "database and collections to backup. A json string. To support database. 2023.7.7", + "type": "string" + }, + "force": { + "description": "force backup skip flush, Should make sure data has been stored into disk when using it", + "type": "boolean" + }, "requestId": { "description": "uuid of request, will generate one if not set", "type": "string" @@ -573,9 +584,15 @@ const docTemplate = `{ "data_type": { "$ref": "#/definitions/backuppb.DataType" }, + "default_value": { + "$ref": "#/definitions/backuppb.ValueField" + }, "description": { "type": "string" }, + "element_type": { + "$ref": "#/definitions/backuppb.DataType" + }, "fieldID": { "type": "integer" }, @@ -585,6 +602,12 @@ const docTemplate = `{ "$ref": "#/definitions/backuppb.KeyValuePair" } }, + "is_dynamic": { + "type": "boolean" + }, + "is_partition_key": { + "type": "boolean" + }, "is_primary_key": { "type": "boolean" }, @@ -755,6 +778,14 @@ const docTemplate = `{ "description": "Support two ways to rename the collections while recover\n1, set a suffix", "type": "string" }, + "db_collections": { + "description": "database and collections to restore. A json string. To support database. 2023.7.7", + "type": "string" + }, + "metaOnly": { + "description": "if true only restore meta", + "type": "boolean" + }, "path": { "description": "if bucket_name and path is set. will override bucket/path in config.", "type": "string" @@ -762,6 +793,10 @@ const docTemplate = `{ "requestId": { "description": "uuid of request, will generate one if not set", "type": "string" + }, + "restoreIndex": { + "description": "if true restore index info", + "type": "boolean" } } }, @@ -844,6 +879,10 @@ const docTemplate = `{ "id": { "type": "string" }, + "metaOnly": { + "description": "if true only restore meta", + "type": "boolean" + }, "partition_restore_tasks": { "type": "array", "items": { @@ -853,6 +892,10 @@ const docTemplate = `{ "progress": { "type": "integer" }, + "restoreIndex": { + "description": "if true restore index info", + "type": "boolean" + }, "restored_size": { "type": "integer" }, @@ -865,6 +908,9 @@ const docTemplate = `{ "target_collection_name": { "type": "string" }, + "target_db_name": { + "type": "string" + }, "to_restore_size": { "type": "integer" } @@ -954,6 +1000,14 @@ const docTemplate = `{ } } } + }, + "backuppb.ValueField": { + "type": "object", + "properties": { + "data": { + "description": "Types that are valid to be assigned to Data:\n\t*ValueField_BoolData\n\t*ValueField_IntData\n\t*ValueField_LongData\n\t*ValueField_FloatData\n\t*ValueField_DoubleData\n\t*ValueField_StringData\n\t*ValueField_BytesData" + } + } } } }` diff --git a/docs/swagger.json b/docs/swagger.json index dd21d4e0..a3f4ac77 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -438,6 +438,9 @@ "description": { "type": "string" }, + "enable_dynamic_field": { + "type": "boolean" + }, "fields": { "type": "array", "items": { @@ -484,6 +487,14 @@ "type": "string" } }, + "db_collections": { + "description": "database and collections to backup. A json string. To support database. 2023.7.7", + "type": "string" + }, + "force": { + "description": "force backup skip flush, Should make sure data has been stored into disk when using it", + "type": "boolean" + }, "requestId": { "description": "uuid of request, will generate one if not set", "type": "string" @@ -565,9 +576,15 @@ "data_type": { "$ref": "#/definitions/backuppb.DataType" }, + "default_value": { + "$ref": "#/definitions/backuppb.ValueField" + }, "description": { "type": "string" }, + "element_type": { + "$ref": "#/definitions/backuppb.DataType" + }, "fieldID": { "type": "integer" }, @@ -577,6 +594,12 @@ "$ref": "#/definitions/backuppb.KeyValuePair" } }, + "is_dynamic": { + "type": "boolean" + }, + "is_partition_key": { + "type": "boolean" + }, "is_primary_key": { "type": "boolean" }, @@ -747,6 +770,14 @@ "description": "Support two ways to rename the collections while recover\n1, set a suffix", "type": "string" }, + "db_collections": { + "description": "database and collections to restore. A json string. To support database. 2023.7.7", + "type": "string" + }, + "metaOnly": { + "description": "if true only restore meta", + "type": "boolean" + }, "path": { "description": "if bucket_name and path is set. will override bucket/path in config.", "type": "string" @@ -754,6 +785,10 @@ "requestId": { "description": "uuid of request, will generate one if not set", "type": "string" + }, + "restoreIndex": { + "description": "if true restore index info", + "type": "boolean" } } }, @@ -836,6 +871,10 @@ "id": { "type": "string" }, + "metaOnly": { + "description": "if true only restore meta", + "type": "boolean" + }, "partition_restore_tasks": { "type": "array", "items": { @@ -845,6 +884,10 @@ "progress": { "type": "integer" }, + "restoreIndex": { + "description": "if true restore index info", + "type": "boolean" + }, "restored_size": { "type": "integer" }, @@ -857,6 +900,9 @@ "target_collection_name": { "type": "string" }, + "target_db_name": { + "type": "string" + }, "to_restore_size": { "type": "integer" } @@ -946,6 +992,14 @@ } } } + }, + "backuppb.ValueField": { + "type": "object", + "properties": { + "data": { + "description": "Types that are valid to be assigned to Data:\n\t*ValueField_BoolData\n\t*ValueField_IntData\n\t*ValueField_LongData\n\t*ValueField_FloatData\n\t*ValueField_DoubleData\n\t*ValueField_StringData\n\t*ValueField_BytesData" + } + } } } } \ No newline at end of file diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 106e6324..4d182a38 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -126,6 +126,8 @@ definitions: type: boolean description: type: string + enable_dynamic_field: + type: boolean fields: items: $ref: '#/definitions/backuppb.FieldSchema' @@ -160,6 +162,14 @@ definitions: items: type: string type: array + db_collections: + description: database and collections to backup. A json string. To support + database. 2023.7.7 + type: string + force: + description: force backup skip flush, Should make sure data has been stored + into disk when using it + type: boolean requestId: description: uuid of request, will generate one if not set type: string @@ -220,14 +230,22 @@ definitions: type: boolean data_type: $ref: '#/definitions/backuppb.DataType' + default_value: + $ref: '#/definitions/backuppb.ValueField' description: type: string + element_type: + $ref: '#/definitions/backuppb.DataType' fieldID: type: integer index_params: items: $ref: '#/definitions/backuppb.KeyValuePair' type: array + is_dynamic: + type: boolean + is_partition_key: + type: boolean is_primary_key: type: boolean name: @@ -352,6 +370,13 @@ definitions: Support two ways to rename the collections while recover 1, set a suffix type: string + db_collections: + description: database and collections to restore. A json string. To support + database. 2023.7.7 + type: string + metaOnly: + description: if true only restore meta + type: boolean path: description: if bucket_name and path is set. will override bucket/path in config. @@ -359,6 +384,9 @@ definitions: requestId: description: uuid of request, will generate one if not set type: string + restoreIndex: + description: if true restore index info + type: boolean type: object backuppb.RestoreBackupResponse: properties: @@ -410,12 +438,18 @@ definitions: type: string id: type: string + metaOnly: + description: if true only restore meta + type: boolean partition_restore_tasks: items: $ref: '#/definitions/backuppb.RestorePartitionTask' type: array progress: type: integer + restoreIndex: + description: if true restore index info + type: boolean restored_size: type: integer start_time: @@ -424,6 +458,8 @@ definitions: $ref: '#/definitions/backuppb.RestoreTaskStateCode' target_collection_name: type: string + target_db_name: + type: string to_restore_size: type: integer type: object @@ -489,6 +525,11 @@ definitions: $ref: '#/definitions/backuppb.FieldBinlog' type: array type: object + backuppb.ValueField: + properties: + data: + description: "Types that are valid to be assigned to Data:\n\t*ValueField_BoolData\n\t*ValueField_IntData\n\t*ValueField_LongData\n\t*ValueField_FloatData\n\t*ValueField_DoubleData\n\t*ValueField_StringData\n\t*ValueField_BytesData" + type: object info: contact: email: wayasxxx@gmail.com diff --git a/example/db_support/prepare_data.py b/example/db_support/prepare_data.py index 75b0cf1e..9f8e8178 100644 --- a/example/db_support/prepare_data.py +++ b/example/db_support/prepare_data.py @@ -127,7 +127,7 @@ insert_result2 = hello_milvus2.insert(entities2) hello_milvus2.flush() -index_params2 = {"index_type": "TRIE"} +index_params2 = {"index_type": "Trie"} hello_milvus2.create_index("var", index_params2) print(f"Number of entities in hello_milvus2: {hello_milvus2.num_entities}") # check the num_entites diff --git a/example/delete_support/prepare_data.py b/example/delete_support/prepare_data.py new file mode 100644 index 00000000..cb1e8421 --- /dev/null +++ b/example/delete_support/prepare_data.py @@ -0,0 +1,137 @@ +# hello_milvus.py demonstrates the basic operations of PyMilvus, a Python SDK of Milvus. +# 1. connect to Milvus +# 2. create collection +# 3. insert data +# 4. create index +# 5. search, query, and hybrid search on entities +# 6. delete entities by PK +# 7. drop collection +import time +import os +import numpy as np +from pymilvus import ( + connections, + utility, + FieldSchema, CollectionSchema, DataType, + Collection, +) + +fmt = "\n=== {:30} ===\n" +search_latency_fmt = "search latency = {:.4f}s" +num_entities, dim = 3000, 8 + +################################################################################# +# 1. connect to Milvus +# Add a new connection alias `default` for Milvus server in `localhost:19530` +# Actually the "default" alias is a buildin in PyMilvus. +# If the address of Milvus is the same as `localhost:19530`, you can omit all +# parameters and call the method as: `connections.connect()`. +# +# Note: the `using` parameter of the following methods is default to "default". +print(fmt.format("start connecting to Milvus")) + +host = os.environ.get('MILVUS_HOST') +if host == None: + host = "localhost" +print(fmt.format(f"Milvus host: {host}")) +connections.connect("default", host=host, port="19530") + +has = utility.has_collection("hello_milvus") +print(f"Does collection hello_milvus exist in Milvus: {has}") + +################################################################################# +# 2. create collection +# We're going to create a collection with 3 fields. +# +-+------------+------------+------------------+------------------------------+ +# | | field name | field type | other attributes | field description | +# +-+------------+------------+------------------+------------------------------+ +# |1| "pk" | Int64 | is_primary=True | "primary field" | +# | | | | auto_id=False | | +# +-+------------+------------+------------------+------------------------------+ +# |2| "random" | Double | | "a double field" | +# +-+------------+------------+------------------+------------------------------+ +# |3|"embeddings"| FloatVector| dim=8 | "float vector with dim 8" | +# +-+------------+------------+------------------+------------------------------+ +fields = [ + FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=False), + FieldSchema(name="random", dtype=DataType.DOUBLE), + FieldSchema(name="var", dtype=DataType.VARCHAR, max_length=65535), + FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=dim) +] + +schema = CollectionSchema(fields, "hello_milvus") + +print(fmt.format("Create collection `hello_milvus`")) +hello_milvus = Collection("hello_milvus", schema, consistency_level="Strong") + +################################################################################ +# 3. insert data +# We are going to insert 3000 rows of data into `hello_milvus` +# Data to be inserted must be organized in fields. +# +# The insert() method returns: +# - either automatically generated primary keys by Milvus if auto_id=True in the schema; +# - or the existing primary key field from the entities if auto_id=False in the schema. + +print(fmt.format("Start inserting entities")) +rng = np.random.default_rng(seed=19530) +entities = [ + # provide the pk field because `auto_id` is set to False + [i for i in range(num_entities)], + rng.random(num_entities).tolist(), # field random, only supports list + [str(i) for i in range(num_entities)], + rng.random((num_entities, dim)), # field embeddings, supports numpy.ndarray and list +] + +insert_result = hello_milvus.insert(entities) +hello_milvus.flush() + +hello_milvus.delete("pk in [0,1,2,3,4]") +hello_milvus.flush() + +print(f"Number of entities in hello_milvus: {hello_milvus.num_entities}") # check the num_entites + +# create another collection +fields2 = [ + FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=False), + FieldSchema(name="random", dtype=DataType.DOUBLE), + FieldSchema(name="var", dtype=DataType.VARCHAR, max_length=65535), + FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=dim) +] + +schema2 = CollectionSchema(fields2, "hello_milvus2") + +print(fmt.format("Create collection `hello_milvus2`")) +hello_milvus2 = Collection("hello_milvus2", schema2, consistency_level="Strong") + +entities2 = [ + [i for i in range(num_entities)], + rng.random(num_entities).tolist(), # field random, only supports list + [str(i) for i in range(num_entities)], + rng.random((num_entities, dim)), # field embeddings, supports numpy.ndarray and list +] + +insert_result2 = hello_milvus2.insert(entities2) +hello_milvus.delete("pk in [0,1,2,3,4]") +hello_milvus2.flush() + +entities3 = [ + [i + num_entities for i in range(num_entities)], + rng.random(num_entities).tolist(), # field random, only supports list + [str(i) for i in range(num_entities)], + rng.random((num_entities, dim)), # field embeddings, supports numpy.ndarray and list +] + +insert_result2 = hello_milvus2.insert(entities3) +hello_milvus.delete("pk in [5,6,7,8,9]") +hello_milvus.delete("pk in [3000,3001,3002,3003,3004]") +hello_milvus2.flush() + +index_params = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} +hello_milvus.create_index("embeddings", index_params) + +index_params2 = {"index_type": "Trie"} +hello_milvus2.create_index("var", index_params2) + +print(f"Number of entities in hello_milvus2: {hello_milvus2.num_entities}") # check the num_entites + diff --git a/example/partition_data/clean_data.py b/example/partition_data/clean_data.py new file mode 100644 index 00000000..41c38ad4 --- /dev/null +++ b/example/partition_data/clean_data.py @@ -0,0 +1,20 @@ +import os +from pymilvus import ( + connections, + utility, +) + +fmt = "\n=== {:30} ===\n" + +print(fmt.format("start connecting to Milvus")) +host = os.environ.get('MILVUS_HOST') +if host == None: + host = "localhost" +print(fmt.format(f"Milvus host: {host}")) +connections.connect("default", host=host, port="19530") + +print(fmt.format("Drop collection `hello_milvus_part`")) +utility.drop_collection("hello_milvus_part") + +print(fmt.format(f"Drop collection `hello_milvus_part_recover`")) +utility.drop_collection("hello_milvus_part_recover") \ No newline at end of file diff --git a/example/partition_data/prepare_data.py b/example/partition_data/prepare_data.py new file mode 100644 index 00000000..3a000311 --- /dev/null +++ b/example/partition_data/prepare_data.py @@ -0,0 +1,101 @@ +# hello_milvus.py demonstrates the basic operations of PyMilvus, a Python SDK of Milvus. +# 1. connect to Milvus +# 2. create collection +# 3. insert data +# 4. create index +# 5. search, query, and hybrid search on entities +# 6. delete entities by PK +# 7. drop collection +import time +import os +import numpy as np +from pymilvus import ( + connections, + utility, + FieldSchema, CollectionSchema, DataType, + Collection, Partition +) + +fmt = "\n=== {:30} ===\n" +search_latency_fmt = "search latency = {:.4f}s" +dim = 8 + +################################################################################# +# 1. connect to Milvus +# Add a new connection alias `default` for Milvus server in `localhost:19530` +# Actually the "default" alias is a buildin in PyMilvus. +# If the address of Milvus is the same as `localhost:19530`, you can omit all +# parameters and call the method as: `connections.connect()`. +# +# Note: the `using` parameter of the following methods is default to "default". +print(fmt.format("start connecting to Milvus")) + +host = os.environ.get('MILVUS_HOST') +if host == None: + host = "localhost" +print(fmt.format(f"Milvus host: {host}")) +connections.connect("default", host=host, port="19530") + +collection_name = "hello_milvus_part" +has = utility.has_collection(collection_name) +print(f"Does collection hello_milvus_part exist in Milvus: {has}") + +################################################################################# +# 2. create collection +# We're going to create a collection with 3 fields. +# +-+------------+------------+------------------+------------------------------+ +# | | field name | field type | other attributes | field description | +# +-+------------+------------+------------------+------------------------------+ +# |1| "pk" | Int64 | is_primary=True | "primary field" | +# | | | | auto_id=False | | +# +-+------------+------------+------------------+------------------------------+ +# |2| "random" | Double | | "a double field" | +# +-+------------+------------+------------------+------------------------------+ +# |3|"embeddings"| FloatVector| dim=8 | "float vector with dim 8" | +# +-+------------+------------+------------------+------------------------------+ +fields = [ + FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True), + FieldSchema(name="random", dtype=DataType.DOUBLE), + FieldSchema(name="var", dtype=DataType.VARCHAR, max_length=65535), + FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=dim) +] + +schema = CollectionSchema(fields, collection_name) + +print(fmt.format(f"Create collection {collection_name}")) +hello_milvus = Collection(collection_name, schema, consistency_level="Strong") +part1 = Partition(collection_name, "part1") +part2 = Partition(collection_name, "part2") + +################################################################################ +# 3. insert data +# We are going to insert 3000 rows of data into `hello_milvus` +# Data to be inserted must be organized in fields. +# +# The insert() method returns: +# - either automatically generated primary keys by Milvus if auto_id=True in the schema; +# - or the existing primary key field from the entities if auto_id=False in the schema. + +print(fmt.format("Start inserting entities")) +rng = np.random.default_rng(seed=19530) + +num_entities = 3000 +entities = [ + # [i for i in range(num_entities)], + rng.random(num_entities).tolist(), # field random, only supports list + [str(i) for i in range(num_entities)], + rng.random((num_entities, dim)), # field embeddings, supports numpy.ndarray and list +] + +num_entities = 6000 +entities2 = [ + # [i for i in range(num_entities)], + rng.random(num_entities).tolist(), # field random, only supports list + [str(i) for i in range(num_entities)], + rng.random((num_entities, dim)), # field embeddings, supports numpy.ndarray and list +] + +insert_result = part1.insert(entities) +insert_result = part2.insert(entities2) +hello_milvus.flush() +print(f"Number of entities in {collection_name}: {hello_milvus.num_entities}") # check the num_entites diff --git a/example/partition_data/verify_data.py b/example/partition_data/verify_data.py new file mode 100644 index 00000000..7f6e239b --- /dev/null +++ b/example/partition_data/verify_data.py @@ -0,0 +1,113 @@ +import time +import os +import numpy as np +from pymilvus import ( + connections, + utility, + FieldSchema, CollectionSchema, DataType, + Collection, +) + +fmt = "\n=== {:30} ===\n" +search_latency_fmt = "search latency = {:.4f}s" +num_entities, dim = 3000, 8 +rng = np.random.default_rng(seed=19530) +entities = [ + # provide the pk field because `auto_id` is set to False + [i for i in range(num_entities)], + rng.random(num_entities).tolist(), # field random, only supports list + rng.random((num_entities, dim)), # field embeddings, supports numpy.ndarray and list +] + +################################################################################ +# 1. get recovered collection hello_milvus_recover +print(fmt.format("start connecting to Milvus")) +host = os.environ.get('MILVUS_HOST') +if host == None: + host = "localhost" +print(fmt.format(f"Milvus host: {host}")) +connections.connect("default", host=host, port="19530") + +recover_collections = ["hello_milvus_part_recover"] + +for recover_collection_name in recover_collections: + has = utility.has_collection(recover_collection_name) + print(f"Does collection {recover_collection_name} exist in Milvus: {has}") + recover_collection = Collection(recover_collection_name) + print(recover_collection.schema) + recover_collection.flush() + + print(f"Number of entities in Milvus: {recover_collection_name} : {recover_collection.num_entities}") # check the num_entites + + ################################################################################ + # 4. create index + # We are going to create an IVF_FLAT index for hello_milvus_recover collection. + # create_index() can only be applied to `FloatVector` and `BinaryVector` fields. + print(fmt.format("Start Creating index IVF_FLAT")) + index = { + "index_type": "IVF_FLAT", + "metric_type": "L2", + "params": {"nlist": 128}, + } + + recover_collection.create_index("embeddings", index) + + ################################################################################ + # 5. search, query, and hybrid search + # After data were inserted into Milvus and indexed, you can perform: + # - search based on vector similarity + # - query based on scalar filtering(boolean, int, etc.) + # - hybrid search based on vector similarity and scalar filtering. + # + + # Before conducting a search or a query, you need to load the data in `hello_milvus` into memory. + print(fmt.format("Start loading")) + recover_collection.load() + + # ----------------------------------------------------------------------------- + # search based on vector similarity + print(fmt.format("Start searching based on vector similarity")) + vectors_to_search = entities[-1][-2:] + search_params = { + "metric_type": "L2", + "params": {"nprobe": 10}, + } + + start_time = time.time() + result = recover_collection.search(vectors_to_search, "embeddings", search_params, limit=3, output_fields=["random"]) + end_time = time.time() + + for hits in result: + for hit in hits: + print(f"hit: {hit}, random field: {hit.entity.get('random')}") + print(search_latency_fmt.format(end_time - start_time)) + + # ----------------------------------------------------------------------------- + # query based on scalar filtering(boolean, int, etc.) + print(fmt.format("Start querying with `random > 0.5`")) + + start_time = time.time() + result = recover_collection.query(expr="random > 0.5", output_fields=["random", "embeddings"]) + end_time = time.time() + + print(f"query result:\n-{result[0]}") + print(search_latency_fmt.format(end_time - start_time)) + + # ----------------------------------------------------------------------------- + # hybrid search + print(fmt.format("Start hybrid searching with `random > 0.5`")) + + start_time = time.time() + result = recover_collection.search(vectors_to_search, "embeddings", search_params, limit=3, expr="random > 0.5", output_fields=["random"]) + end_time = time.time() + + for hits in result: + for hit in hits: + print(f"hit: {hit}, random field: {hit.entity.get('random')}") + print(search_latency_fmt.format(end_time - start_time)) + + ############################################################################### + # 7. drop collection + # Finally, drop the hello_milvus, hello_milvus_recover collection + # print(fmt.format(f"Drop collection {recover_collection_name}")) + # utility.drop_collection(recover_collection_name) \ No newline at end of file diff --git a/example/partition_key_support/clean_data.py b/example/partition_key_support/clean_data.py index 6b5cc1b2..dee61a4d 100644 --- a/example/partition_key_support/clean_data.py +++ b/example/partition_key_support/clean_data.py @@ -17,4 +17,4 @@ utility.drop_collection("hello_milvus_pk") print(fmt.format(f"Drop collection `hello_milvus_pk_recover`")) -utility.drop_collection("hello_milvus_recover_pk") \ No newline at end of file +utility.drop_collection("hello_milvus_pk_recover") \ No newline at end of file diff --git a/example/partition_key_support/prepare_data.py b/example/partition_key_support/prepare_data.py index 0c6bb126..ec6e7e18 100644 --- a/example/partition_key_support/prepare_data.py +++ b/example/partition_key_support/prepare_data.py @@ -40,15 +40,15 @@ FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=dim) ] default_schema = CollectionSchema(fields=default_fields, description="test partition-key collection", partition_key_field="key") -hello_milvus = Collection(name="hello_milvus_pk", schema=default_schema, shard_num=1, num_partitions=100) +hello_milvus = Collection(name="hello_milvus_pk", schema=default_schema, shard_num=1, num_partitions=20) -nb = 100 +nb = 20 rng = np.random.default_rng(seed=19530) random_data = rng.random(nb).tolist() vec_data = [[random.random() for _ in range(dim)] for _ in range(nb)] -_len = int(100) +_len = int(20) _str = string.ascii_letters + string.digits _s = _str print("_str size ", len(_str)) diff --git a/example/prepare_data.py b/example/prepare_data.py index a6142241..531e0700 100644 --- a/example/prepare_data.py +++ b/example/prepare_data.py @@ -111,12 +111,12 @@ insert_result2 = hello_milvus2.insert(entities2) hello_milvus2.flush() -index_params = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} -hello_milvus.create_index("embeddings", index_params) +# index_params = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} +# hello_milvus.create_index("embeddings", index_params) +# hello_milvus2.create_index(field_name="var",index_name="scalar_index") - -index_params2 = {"index_type": "TRIE"} -hello_milvus2.create_index("var", index_params2) +# index_params2 = {"index_type": "Trie"} +# hello_milvus2.create_index("var", index_params2) print(f"Number of entities in hello_milvus2: {hello_milvus2.num_entities}") # check the num_entites diff --git a/go.mod b/go.mod index afaa9462..e3d48fad 100644 --- a/go.mod +++ b/go.mod @@ -3,31 +3,36 @@ module github.com/zilliztech/milvus-backup go 1.18 require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 + github.com/aliyun/credentials-go v1.3.0 github.com/blang/semver/v4 v4.0.0 github.com/cockroachdb/errors v1.9.1 github.com/gin-gonic/gin v1.8.1 github.com/golang/protobuf v1.5.2 github.com/google/btree v1.0.1 - github.com/google/uuid v1.1.2 + github.com/google/uuid v1.3.0 + github.com/json-iterator/go v1.1.12 github.com/lingdor/stackerror v0.0.0-20191119040541-976d8885ed76 - github.com/milvus-io/milvus-sdk-go/v2 v2.2.2 + github.com/milvus-io/milvus-sdk-go/v2 v2.3.0 github.com/minio/minio-go/v7 v7.0.17 github.com/pkg/errors v0.9.1 github.com/sony/sonyflake v1.1.0 github.com/spf13/cast v1.3.1 github.com/spf13/cobra v1.5.0 github.com/spf13/viper v1.8.1 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.4 github.com/swaggo/files v1.0.0 github.com/swaggo/gin-swagger v1.5.3 - github.com/swaggo/swag v1.8.10 + github.com/swaggo/swag v1.16.1 github.com/uber/jaeger-client-go v2.25.0+incompatible go.etcd.io/etcd/client/v3 v3.5.0 go.uber.org/atomic v1.10.0 go.uber.org/zap v1.17.0 golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 + golang.org/x/sync v0.3.0 golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 google.golang.org/grpc v1.48.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -35,7 +40,11 @@ require ( require ( cloud.google.com/go v0.81.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect github.com/KyleBanks/depth v1.2.1 // indirect + github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 // indirect + github.com/alibabacloud-go/tea v1.1.8 // indirect github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/coreos/go-semver v0.3.0 // indirect @@ -45,29 +54,30 @@ require ( github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/getsentry/sentry-go v0.12.0 // indirect github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/swag v0.19.15 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect github.com/go-playground/validator/v10 v10.10.0 // indirect github.com/goccy/go-json v0.9.7 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.13.5 // indirect github.com/klauspost/cpuid v1.3.1 // indirect - github.com/kr/pretty v0.3.0 // indirect + github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/magiconair/properties v1.8.5 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.14 // indirect - github.com/milvus-io/milvus-proto/go-api v0.0.0-20230522080721-2975bfe7a190 // indirect + github.com/milvus-io/milvus-proto/go-api/v2 v2.3.0 // indirect github.com/minio/md5-simd v1.1.0 // indirect github.com/minio/sha256-simd v0.1.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -76,8 +86,9 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml v1.9.3 // indirect github.com/pelletier/go-toml/v2 v2.0.1 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rs/xid v1.2.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spf13/afero v1.6.0 // indirect @@ -91,17 +102,17 @@ require ( go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect - golang.org/x/net v0.2.0 // indirect - golang.org/x/sys v0.2.0 // indirect - golang.org/x/text v0.4.0 // indirect - golang.org/x/tools v0.1.12 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/tools v0.11.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) -replace github.com/milvus-io/milvus-sdk-go/v2 => github.com/wayblink/milvus-sdk-go/v2 v2.2.20 +replace github.com/milvus-io/milvus-sdk-go/v2 => github.com/wayblink/milvus-sdk-go/v2 v2.3.0-beta5 diff --git a/go.sum b/go.sum index 2a040a83..36329ecb 100644 --- a/go.sum +++ b/go.sum @@ -39,6 +39,22 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -57,6 +73,12 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 h1:NqugFkGxx1TXSh/pBcU00Y6bljgDPaFdh5MUSeJ7e50= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= +github.com/alibabacloud-go/tea v1.1.8 h1:vFF0707fqjGiQTxrtMnIXRjOCvQXf49CuDVRtTopmwU= +github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/aliyun/credentials-go v1.3.0 h1:wfBNojfNJJyuHK3YUIIjRPwnlQIdmy/YMkia1XOnPtY= +github.com/aliyun/credentials-go v1.3.0/go.mod h1:8jKYhQuDawt8x2+fusqa1Y6mPxemTsBEN04dgcAcYz0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -107,6 +129,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= @@ -152,16 +175,22 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= @@ -184,7 +213,11 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -251,12 +284,14 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= @@ -333,12 +368,15 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= @@ -369,8 +407,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/milvus-io/milvus-proto/go-api v0.0.0-20230522080721-2975bfe7a190 h1:ZREJhOMgAvXs+K0ain51ibxAtCB8Lnn3EBZFHEykIlk= -github.com/milvus-io/milvus-proto/go-api v0.0.0-20230522080721-2975bfe7a190/go.mod h1:148qnlmZ0Fdm1Fq+Mj/OW2uDoEP25g3mjh0vMGtkgmk= +github.com/milvus-io/milvus-proto/go-api/v2 v2.3.0 h1:t5CKm7+FXuD2rDLv/H8tpN9iY8F2dZvHF87xWBx8muU= +github.com/milvus-io/milvus-proto/go-api/v2 v2.3.0/go.mod h1:1OIl0v5PQeNxIJhCvY+K55CBUOYDZevw9g9380u1Wek= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v7 v7.0.17 h1:5SiS3pqiQDbNhmXMxtqn2HzAInbN5cbHT7ip9F0F07E= @@ -420,6 +458,8 @@ github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -448,8 +488,9 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -466,8 +507,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.1.0 h1:MkTeG1DMwsrdH7QtLXy5W+fUxWq+vmb6cLmyJ7aRtF0= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/sony/sonyflake v1.1.0 h1:wnrEcL3aOkWmPlhScLEGAXKkLAIslnBteNUq4Bw6MM4= @@ -503,8 +545,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= @@ -513,8 +556,8 @@ github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO github.com/swaggo/gin-swagger v1.5.3 h1:8mWmHLolIbrhJJTflsaFoZzRBYVmEE7JZGIq08EiC0Q= github.com/swaggo/gin-swagger v1.5.3/go.mod h1:3XJKSfHjDMB5dBo/0rrTXidPmgLeqsX89Yp4uA50HpI= github.com/swaggo/swag v1.8.1/go.mod h1:ugemnJsPZm/kRwFUnzBlbHRd0JY9zE1M4F+uy2pAaPQ= -github.com/swaggo/swag v1.8.10 h1:eExW4bFa52WOjqRzRD58bgWsWfdFJso50lpbeTcmTfo= -github.com/swaggo/swag v1.8.10/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk= +github.com/swaggo/swag v1.16.1 h1:fTNRhKstPKxcnoKsytm4sahr8FaYzUcT7i1/3nd/fBg= +github.com/swaggo/swag v1.16.1/go.mod h1:9/LMvHycG3NFHfR6LwvikHv5iFvmPADQ359cKikGxto= github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -537,8 +580,8 @@ github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBn github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/wayblink/milvus-sdk-go/v2 v2.2.20 h1:kZCw4jL7AUpctlYfoXANtu+x4BFPHvBg6Xjps3PBUHM= -github.com/wayblink/milvus-sdk-go/v2 v2.2.20/go.mod h1:bFT+53/Uc+pWP85UJkxnWQs3H78q4JQd/39ed+BEGhU= +github.com/wayblink/milvus-sdk-go/v2 v2.3.0-beta5 h1:2hPosJlUEWFHVD85iNE0ywYhv/EIY4YK/0ZTsOzQE/E= +github.com/wayblink/milvus-sdk-go/v2 v2.3.0-beta5/go.mod h1:/o0HejRTQciRiHniFDMJiCHswVn1UbFChgtlNRRChd4= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -593,8 +636,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -631,8 +675,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -680,8 +724,10 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -706,8 +752,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -767,6 +814,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -776,12 +824,15 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -791,8 +842,10 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -853,8 +906,9 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -975,8 +1029,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -989,6 +1043,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= diff --git a/internal/common/workerpool.go b/internal/common/workerpool.go index 73286a1b..fd3319cb 100644 --- a/internal/common/workerpool.go +++ b/internal/common/workerpool.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "go.uber.org/atomic" + "sync" "time" "golang.org/x/sync/errgroup" @@ -12,16 +14,25 @@ import ( // WorkerPool a pool that can control the total amount and rate of concurrency type WorkerPool struct { - job chan Job + job chan JobWithId g *errgroup.Group subCtx context.Context workerNum int lim *rate.Limiter + + nextId atomic.Int64 + jobsStatus sync.Map + jobsError sync.Map } type Job func(ctx context.Context) error +type JobWithId struct { + job Job + id int64 +} + // NewWorkerPool build a worker pool, rps 0 is unlimited func NewWorkerPool(ctx context.Context, workerNum int, rps int32) (*WorkerPool, error) { if workerNum <= 0 { @@ -36,13 +47,19 @@ func NewWorkerPool(ctx context.Context, workerNum int, rps int32) (*WorkerPool, lim = rate.NewLimiter(rate.Every(time.Second/time.Duration(rps)), 1) } - return &WorkerPool{job: make(chan Job), workerNum: workerNum, g: g, lim: lim, subCtx: subCtx}, nil + return &WorkerPool{job: make(chan JobWithId), workerNum: workerNum, g: g, lim: lim, subCtx: subCtx}, nil +} + +func (p *WorkerPool) Start() { + //p.jobsStatus = make(map[*Job]string) + //p.jobsError = make(map[*Job]error) + p.g.Go(p.work) + p.nextId = atomic.Int64{} } -func (p *WorkerPool) Start() { p.g.Go(p.work) } func (p *WorkerPool) work() error { for j := range p.job { - job := j + jobWithId := j p.g.Go(func() error { if p.lim != nil { if err := p.lim.Wait(p.subCtx); err != nil { @@ -50,16 +67,54 @@ func (p *WorkerPool) work() error { } } - if err := job(p.subCtx); err != nil { + if err := jobWithId.job(p.subCtx); err != nil { + p.jobsError.Store(jobWithId.id, err) + p.jobsStatus.Store(jobWithId.id, "done") return fmt.Errorf("workerpool: execute job %w", err) } - + p.jobsStatus.Store(jobWithId.id, "done") return nil }) } return nil } -func (p *WorkerPool) Submit(job Job) { p.job <- job } -func (p *WorkerPool) Done() { close(p.job) } -func (p *WorkerPool) Wait() error { return p.g.Wait() } +func (p *WorkerPool) Submit(job Job) { + jobId := p.nextId.Inc() + p.job <- JobWithId{job: job, id: jobId} + //p.jobsStatus.Store(jobId, "started") +} +func (p *WorkerPool) Done() { close(p.job) } +func (p *WorkerPool) Wait() error { return p.g.Wait() } + +func (p *WorkerPool) SubmitWithId(job Job) int64 { + jobId := p.nextId.Inc() + p.job <- JobWithId{job: job, id: jobId} + return jobId +} + +func (p *WorkerPool) WaitJobs(jobIds []int64) error { + for { + var done = true + var err error = nil + for _, jobId := range jobIds { + if value, ok := p.jobsStatus.Load(jobId); ok && value == "done" { + done = done + } else { + done = false + break + } + + if jobError, exist := p.jobsError.Load(jobId); exist { + err = jobError.(error) + break + } + } + if err != nil { + return err + } + if done { + return nil + } + } +} diff --git a/internal/common/workerpool_test.go b/internal/common/workerpool_test.go index 99bb5537..1c2112da 100644 --- a/internal/common/workerpool_test.go +++ b/internal/common/workerpool_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "testing" + "time" "github.com/stretchr/testify/assert" "go.uber.org/atomic" @@ -48,3 +49,30 @@ func TestRunTaskReturnErr(t *testing.T) { wp.Done() assert.NotNil(t, wp.Wait()) } + +func TestWaitJobs(t *testing.T) { + wp, err := NewWorkerPool(context.Background(), 3, 10) + assert.Nil(t, err) + + wp.Start() + start := time.Now().Unix() + jobs := make([]int64, 0) + for i := 0; i < 10; i++ { + job := func(ctx context.Context) error { + //return errors.New("some err") + time.Sleep(2 * time.Second) + //return errors.New("some err") + return nil + } + id := wp.SubmitWithId(job) + jobs = append(jobs, id) + } + + //time.Sleep(15 * time.Second) + err = wp.WaitJobs(jobs) + + assert.NoError(t, err) + duration := time.Now().Unix() - start + assert.True(t, duration >= 8) + //wp.Done() +} diff --git a/internal/log/log.go b/internal/log/log.go index 97bd4acd..8cb9a7ec 100644 --- a/internal/log/log.go +++ b/internal/log/log.go @@ -130,7 +130,7 @@ func initFileLog(cfg *FileLogConfig) (*lumberjack.Logger, error) { } func newStdLogger() (*zap.Logger, *ZapProperties) { - conf := &Config{Level: "debug", File: FileLogConfig{}} + conf := &Config{Level: "info", File: FileLogConfig{}} lg, r, _ := InitLogger(conf, zap.AddCallerSkip(1)) return lg, r } diff --git a/main.go b/main.go index 4ca73107..96f1bd3b 100644 --- a/main.go +++ b/main.go @@ -5,6 +5,12 @@ import ( _ "github.com/zilliztech/milvus-backup/docs" ) +var ( + version = "dev" + commit = "unknown" + date = "unknown" +) + // @title Milvus Backup Service // @version 1.0 // @description A data backup & restore tool for Milvus @@ -14,5 +20,6 @@ import ( // @license.url http://www.apache.org/licenses/LICENSE-2.0.html // @BasePath /api/v1 func main() { + cmd.SetVersionInfo(version, commit, date) cmd.Execute() } diff --git a/proto_gen_go.sh b/proto_gen_go.sh index abe63ff7..dea44c17 100755 --- a/proto_gen_go.sh +++ b/proto_gen_go.sh @@ -34,4 +34,8 @@ sed -i "" -e "s/data,omitempty/data/g" ./backuppb/backup.pb.go # remove size omitempty sed -i "" -e "s/size,omitempty/size/g" ./backuppb/backup.pb.go +# to make db_collections field compatible to both json and string +#sed -i "" -e "s/*_struct.Value/interface{}/g" ./backuppb/backup.pb.go +#sed -i "" '/_struct "github.com\/golang\/protobuf\/ptypes\/struct"/d' ./backuppb/backup.pb.go + popd diff --git a/tests/base/client_base.py b/tests/base/client_base.py index 3063dec1..278bf59c 100644 --- a/tests/base/client_base.py +++ b/tests/base/client_base.py @@ -249,7 +249,7 @@ def collection_insert_multi_segments_one_shard(self, collection_prefix, num_of_s return collection_w def prepare_data(self, name=None, db_name="default", nb=ct.default_nb, dim=ct.default_dim, is_binary=False, auto_id=False, - primary_field=ct.default_int64_field_name, is_flushed=True, check_function=False): + primary_field=ct.default_int64_field_name, is_flushed=True, check_function=False, enable_partition=False): """ prepare data for test case """ @@ -262,6 +262,10 @@ def prepare_data(self, name=None, db_name="default", nb=ct.default_nb, dim=ct.de default_schema = cf.gen_default_binary_collection_schema(auto_id=auto_id, dim=dim, primary_field=primary_field) collection_w = self.init_collection_wrap(name=name, schema=default_schema, active_trace=True) + # create partitions + if enable_partition: + for i in range(5): + partition_w = self.init_partition_wrap(collection_wrap=collection_w) assert collection_w.name == name if nb > 0: cf.insert_data(collection_w, nb=nb, is_binary=is_binary, auto_id=auto_id, dim=dim) @@ -343,6 +347,11 @@ def compare_collections(self, src_name, dist_name, output_fields=None): f"collection_src num_entities: {collection_src.num_entities} != " \ f"collection_dist num_entities: {collection_dist.num_entities}" assert collection_src.schema == collection_dist.schema + # get partitions + partitions_src = collection_src.partitions + partitions_dist = collection_dist.partitions + log.info(f"partitions_src: {partitions_src}, partitions_dist: {partitions_dist}") + assert len(partitions_src) == len(partitions_dist) for coll in [collection_src, collection_dist]: is_binary = self.is_binary_by_schema(coll.schema) diff --git a/tests/common/common_type.py b/tests/common/common_type.py index 6249f846..6a57cd80 100644 --- a/tests/common/common_type.py +++ b/tests/common/common_type.py @@ -242,5 +242,4 @@ class CaseLabel: L1 = "L1" L2 = "L2" L3 = "L3" - Loadbalance = "Loadbalance" # loadbalance testcases which need to be run in multi query nodes - ClusterOnly = "ClusterOnly" # For functions only suitable to cluster mode + Perf = "Perf" diff --git a/tests/testcases/test_backup_e2e.py b/tests/testcases/test_backup_e2e.py index c1b084fd..74a877da 100644 --- a/tests/testcases/test_backup_e2e.py +++ b/tests/testcases/test_backup_e2e.py @@ -17,7 +17,7 @@ class TestE2e(TestcaseBase): """ Test case of end to end""" - @pytest.mark.tags(CaseLabel.L1) + @pytest.mark.tags(CaseLabel.L0) def test_milvus_backup_default(self): # prepare data name_origin = cf.gen_unique_str(c_name_prefix) diff --git a/tests/testcases/test_backup_perf.py b/tests/testcases/test_backup_perf.py index 318e1b59..5ec2735b 100644 --- a/tests/testcases/test_backup_perf.py +++ b/tests/testcases/test_backup_perf.py @@ -3,7 +3,6 @@ from base.client_base import TestcaseBase from common import common_func as cf -from common import common_type as ct from common.common_type import CaseLabel from utils.util_log import test_log as log from api.milvus_backup import MilvusBackupClient @@ -14,18 +13,21 @@ client = MilvusBackupClient("http://localhost:8080/api/v1") +@pytest.mark.tags(CaseLabel.Perf) class TestPerf(TestcaseBase): """ Test case of performance""" - @pytest.mark.tags(CaseLabel.L3) def test_milvus_create_backup_perf(self): # prepare data - total_nb = 1000000 + total_nb = 10000 cnt = 10 - collection_to_backup = cf.gen_unique_str(c_name_prefix) - for i in range(cnt): - self.prepare_data(collection_to_backup, nb=total_nb // cnt) - collections_to_backup = [collection_to_backup] + coll_num = 2 + collections_to_backup = [] + for i in range(coll_num): + collection_to_backup = cf.gen_unique_str(c_name_prefix) + for j in range(cnt): + self.prepare_data(collection_to_backup, nb=total_nb // cnt) + collections_to_backup.append(collection_to_backup) checkers = { Op.create: BackupCreateChecker(collections_to_backup) } @@ -39,15 +41,17 @@ def test_milvus_create_backup_perf(self): sleep(10) log.info("*********************Perf Test End**********************") - @pytest.mark.tags(CaseLabel.L3) def test_milvus_restore_backup_perf(self): # prepare data - total_nb = 1000000 + total_nb = 10000 cnt = 10 - collection_to_backup = cf.gen_unique_str(c_name_prefix) - for i in range(cnt): - self.prepare_data(collection_to_backup, nb=total_nb // cnt) - collections_to_backup = [collection_to_backup] + coll_num = 2 + collections_to_backup = [] + for i in range(coll_num): + collection_to_backup = cf.gen_unique_str(c_name_prefix) + for j in range(cnt): + self.prepare_data(collection_to_backup, nb=total_nb // cnt) + collections_to_backup.append(collection_to_backup) backup_name = cf.gen_unique_str(backup_prefix) suffix = "_bak" diff --git a/tests/testcases/test_restore_backup.py b/tests/testcases/test_restore_backup.py index a37061e4..5b21dd4c 100644 --- a/tests/testcases/test_restore_backup.py +++ b/tests/testcases/test_restore_backup.py @@ -1,7 +1,9 @@ import time import pytest +import json import numpy as np -from pymilvus import db +from collections import defaultdict +from pymilvus import db, list_collections, Collection from base.client_base import TestcaseBase from common import common_func as cf from common import common_type as ct @@ -15,31 +17,32 @@ client = MilvusBackupClient("http://localhost:8080/api/v1") +@pytest.mark.tags(CaseLabel.L0) class TestRestoreBackup(TestcaseBase): """ Test case of end to end""" - @pytest.mark.tags(CaseLabel.L1) - @pytest.mark.parametrize("nb", [0, 3000]) + @pytest.mark.parametrize("nb", [3000]) @pytest.mark.parametrize("is_auto_id", [True, False]) + @pytest.mark.parametrize("enable_partition", [False]) @pytest.mark.parametrize("is_async", [True, False]) @pytest.mark.parametrize("collection_need_to_restore", [1, 2, 3]) @pytest.mark.parametrize("collection_type", ["binary", "float", "all"]) - def test_milvus_restore_back(self, collection_type, collection_need_to_restore, is_async, is_auto_id, nb): + def test_milvus_restore_back(self, collection_type, collection_need_to_restore, is_async, is_auto_id, enable_partition, nb): # prepare data names_origin = [] back_up_name = cf.gen_unique_str(backup_prefix) if collection_type == "all": for is_binary in [True, False, False]: names_origin.append(cf.gen_unique_str(prefix)) - self.prepare_data(names_origin[-1], nb=nb, is_binary=is_binary, auto_id=is_auto_id, check_function=False) + self.prepare_data(names_origin[-1], nb=nb, is_binary=is_binary, auto_id=is_auto_id, check_function=False, enable_partition=enable_partition) if collection_type == "float": for is_binary in [False, False, False]: names_origin.append(cf.gen_unique_str(prefix)) - self.prepare_data(names_origin[-1], nb=nb, is_binary=is_binary, auto_id=is_auto_id, check_function=False) + self.prepare_data(names_origin[-1], nb=nb, is_binary=is_binary, auto_id=is_auto_id, check_function=False, enable_partition=enable_partition) if collection_type == "binary": for is_binary in [True, True, True]: names_origin.append(cf.gen_unique_str(prefix)) - self.prepare_data(names_origin[-1], nb=nb, is_binary=is_binary, auto_id=is_auto_id, check_function=False) + self.prepare_data(names_origin[-1], nb=nb, is_binary=is_binary, auto_id=is_auto_id, check_function=False, enable_partition=enable_partition) log.info(f"name_origin:{names_origin}, back_up_name: {back_up_name}") for name in names_origin: res, _ = self.utility_wrap.has_collection(name) @@ -76,6 +79,66 @@ def test_milvus_restore_back(self, collection_type, collection_need_to_restore, for name in restore_collections: self.compare_collections(name, name+suffix) + @pytest.mark.tags(CaseLabel.L1) + @pytest.mark.parametrize("nb", [3000]) + @pytest.mark.parametrize("is_auto_id", [True]) + @pytest.mark.parametrize("enable_partition", [True]) + @pytest.mark.parametrize("is_async", [True]) + @pytest.mark.parametrize("collection_need_to_restore", [3]) + @pytest.mark.parametrize("collection_type", ["all"]) + def test_milvus_restore_back_with_multi_partition(self, collection_type, collection_need_to_restore, is_async, is_auto_id, enable_partition, nb): + # prepare data + names_origin = [] + back_up_name = cf.gen_unique_str(backup_prefix) + if collection_type == "all": + for is_binary in [True, False, False]: + names_origin.append(cf.gen_unique_str(prefix)) + self.prepare_data(names_origin[-1], nb=nb, is_binary=is_binary, auto_id=is_auto_id, check_function=False, enable_partition=enable_partition) + if collection_type == "float": + for is_binary in [False, False, False]: + names_origin.append(cf.gen_unique_str(prefix)) + self.prepare_data(names_origin[-1], nb=nb, is_binary=is_binary, auto_id=is_auto_id, check_function=False, enable_partition=enable_partition) + if collection_type == "binary": + for is_binary in [True, True, True]: + names_origin.append(cf.gen_unique_str(prefix)) + self.prepare_data(names_origin[-1], nb=nb, is_binary=is_binary, auto_id=is_auto_id, check_function=False, enable_partition=enable_partition) + log.info(f"name_origin:{names_origin}, back_up_name: {back_up_name}") + for name in names_origin: + res, _ = self.utility_wrap.has_collection(name) + assert res is True + # create backup + + names_need_backup = names_origin + payload = {"async": False, "backup_name": back_up_name, "collection_names": names_need_backup} + res = client.create_backup(payload) + log.info(f"create backup response: {res}") + backup = client.get_backup(back_up_name) + assert backup["data"]["name"] == back_up_name + backup_collections = [backup["collection_name"]for backup in backup["data"]["collection_backups"]] + restore_collections = backup_collections + if collection_need_to_restore == "all": + payload = {"async": False, "backup_name": back_up_name, + "collection_suffix": suffix} + else: + restore_collections = names_need_backup[:collection_need_to_restore] + payload = {"async": False, "backup_name": back_up_name, + "collection_suffix": suffix, "collection_names": restore_collections} + t0 = time.time() + res = client.restore_backup(payload) + restore_id = res["data"]["id"] + log.info(f"restore_backup: {res}") + if is_async: + res = client.wait_restore_complete(restore_id) + assert res is True + t1 = time.time() + log.info(f"restore {restore_collections} cost time: {t1 - t0}") + res, _ = self.utility_wrap.list_collections() + for name in restore_collections: + assert name + suffix in res + for name in restore_collections: + self.compare_collections(name, name+suffix) + + @pytest.mark.tags(CaseLabel.L1) def test_milvus_restore_back_with_db_support(self): # prepare data self._connect() @@ -125,6 +188,7 @@ def test_milvus_restore_back_with_db_support(self): @pytest.mark.parametrize("include_partition_key", [True, False]) @pytest.mark.parametrize("include_dynamic", [True, False]) @pytest.mark.parametrize("include_json", [True, False]) + @pytest.mark.tags(CaseLabel.L1) def test_milvus_restore_back_with_new_feature_support(self, include_json, include_dynamic, include_partition_key): self._connect() name_origin = cf.gen_unique_str(prefix) @@ -196,3 +260,65 @@ def test_milvus_restore_back_with_new_feature_support(self, include_json, includ all_backup = [] assert back_up_name not in all_backup + @pytest.mark.parametrize("drop_db", [True, False]) + @pytest.mark.parametrize("str_json", [True, False]) + @pytest.mark.tags(CaseLabel.L1) + def test_milvus_restore_with_db_collections(self, drop_db, str_json): + # prepare data + self._connect() + names_origin = [] + db_collections = defaultdict(list) + for i in range(2): + db_name = cf.gen_unique_str("db") + db.create_database(db_name) + db.using_database(db_name) + for j in range(2): + collection_name = cf.gen_unique_str(prefix) + self.prepare_data(name=collection_name, db_name=db_name, nb=3000, is_binary=False, auto_id=True) + assert collection_name in self.utility_wrap.list_collections()[0] + names_origin.append(f"{db_name}.{collection_name}") + db_collections[db_name].append(collection_name) + db_collections = dict(db_collections) + log.info(f"db_collections:{db_collections}") + log.info(f"name_origin:{names_origin}") + # create backup + back_up_name = cf.gen_unique_str(backup_prefix) + payload = { + "async": False, + "backup_name": back_up_name, + "db_collections": json.dumps(db_collections) if str_json else db_collections, + } + log.info(f"payload: {payload}") + res = client.create_backup(payload) + log.info(f"create backup response: {res}") + res = client.list_backup() + log.info(f"list_backup {res}") + if "data" in res: + all_backup = [r["name"] for r in res["data"]] + else: + all_backup = [] + assert back_up_name in all_backup + if drop_db: + # delete db to check that restore can create db if not exist + for db_name in db_collections: + db.using_database(db_name) + all_collections = list_collections() + for c in all_collections: + collection = Collection(name=c) + collection.drop() + db.drop_database(db_name) + payload = {"async": False, "backup_name": back_up_name, + "db_collections": db_collections, + "collection_suffix": suffix} + log.info(f"restore payload: {payload}") + res = client.restore_backup(payload) + log.info(f"restore_backup: {res}") + for name in names_origin: + db_name = name.split(".")[0] + collection_name = name.split(".")[1] + db.using_database(db_name) + res, _ = self.utility_wrap.list_collections() + log.info(f"collection list in db {db_name}: {res}") + assert collection_name + suffix in res + if not drop_db: + self.compare_collections(collection_name, collection_name + suffix) diff --git a/ut_test.go b/ut_test.go deleted file mode 100644 index 06ab7d0f..00000000 --- a/ut_test.go +++ /dev/null @@ -1 +0,0 @@ -package main